text
stringlengths 4
1.02M
| meta
dict |
---|---|
from flocker.node.agents.blockdevice import (
VolumeException, AlreadyAttachedVolume,
UnknownVolume, UnattachedVolume,
IBlockDeviceAPI, _blockdevicevolume_from_dataset_id,
BlockDeviceVolume
)
from uuid import uuid4, UUID
from zope.interface import implementer
from twisted.python.filepath import FilePath
from huawei_oceanstor_flocker_plugin import rest_client
from huawei_oceanstor_flocker_plugin import huawei_utils
from huawei_oceanstor_flocker_plugin.log import LOG
import json
@implementer(IBlockDeviceAPI)
class HuaweiBlockDeviceAPI(object):
"""
Huawei driver implemented ``IBlockDeviceAPI``.
"""
def __init__(self, cluster_id, huawei_conf_file,
compute_instance_id=None,
allocation_unit=None):
"""
:param cluster_id: An ID that include in the
names of Huawei volumes to identify cluster.
:param huawei_conf_file: The path of huawei config file.
:param compute_instance_id: An ID that used to create
host on the array to identify node.
:param allocation_unit: Allocation unit on array.
:returns: A ``BlockDeviceVolume``.
"""
LOG.info("Huawei block device init")
self._host_id = None
self._hostgroup_id = None
self.xml_file_path = huawei_conf_file
self.configuration = huawei_utils.get_login_info(
self.xml_file_path)
self.restclient = rest_client.RestClient(self.configuration)
self.restclient.login()
if compute_instance_id is None:
compute_instance_id = huawei_utils.get_instance_id(
self.xml_file_path)
self._compute_instance_id = compute_instance_id
self._cluster_id = cluster_id
if allocation_unit is None:
allocation_unit = 512
self._allocation_unit = allocation_unit
LOG.info("Finish huawei block device init")
def allocation_unit(self):
"""
The size, in bytes up to which ``IDeployer`` will round volume
sizes before calling ``IBlockDeviceAPI.create_volume``.
:returns: ``int``
"""
LOG.info("Call allocation_unit")
return self._allocation_unit
def compute_instance_id(self):
"""
Get an identifier for this node.
This will be compared against ``BlockDeviceVolume.attached_to``
to determine which volumes are locally attached and it will be used
with ``attach_volume`` to locally attach volumes.
:returns: A ``unicode`` object giving a provider-specific node
identifier which identifies the node where the method is run.
"""
LOG.info("Call compute_instance_id = %s" % self._compute_instance_id)
return unicode(self._compute_instance_id)
def create_volume(self, dataset_id, size):
"""
Create a new volume.
When called by ``IDeployer``, the supplied size will be
rounded up to the nearest ``IBlockDeviceAPI.allocation_unit()``
:param UUID dataset_id: The Flocker dataset ID of the dataset on this
volume.
:param int size: The size of the new volume in bytes.
:returns: A ``BlockDeviceVolume``.
"""
LOG.info("Call create_volume, dataset_id=%s, size=%d"
% (dataset_id, size))
name = huawei_utils.encode_name(dataset_id, self._cluster_id)
parameters = huawei_utils.get_lun_conf_params(self.xml_file_path)
if parameters is None:
raise VolumeException
pool_name = huawei_utils.get_pools(self.xml_file_path)
if pool_name is None:
raise VolumeException
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name,
pools)
lun_param = {"TYPE": '11',
"NAME": name,
"PARENTTYPE": '216',
"PARENTID": pool_info['ID'],
"ALLOCTYPE": parameters['LUNType'],
"CAPACITY": str((size/512)),
"WRITEPOLICY": parameters['WriteType'],
"MIRRORPOLICY": parameters['MirrorSwitch'],
"PREFETCHPOLICY": parameters['PrefetchType'],
"PREFETCHVALUE": parameters['PrefetchValue'],
"DATATRANSFERPOLICY": parameters['policy'],
"READCACHEPOLICY": parameters['readcachepolicy'],
"WRITECACHEPOLICY": parameters['writecachepolicy']}
url = "/lun"
data = json.dumps(lun_param)
result = self.restclient.call(url, data)
lun_info = result['data']
volume = BlockDeviceVolume(
size=int(lun_info['CAPACITY'])*512,
attached_to=None,
dataset_id=huawei_utils.decode_name(lun_info['NAME'],
self._cluster_id),
blockdevice_id=unicode(lun_info['ID'])
)
return volume
def destroy_volume(self, blockdevice_id):
"""
Destroy an existing volume.
:param unicode blockdevice_id: The unique identifier for the volume to
destroy.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:return: ``None``
"""
LOG.info("Call destroy_volume blockdevice_id=%s" % blockdevice_id)
try:
self.restclient.delete_lun(blockdevice_id)
except Exception:
raise UnknownVolume(blockdevice_id)
def initialize_connection_iscsi(self):
"""
TODO: Initialize iscsi connection.
"""
initiator_name = huawei_utils.iscsi_get_initiator()
if initiator_name is None:
raise VolumeException
# Create hostgroup if not exist.
host_id = self.restclient.add_host_with_check(
self._compute_instance_id)
# Add initiator to the host.
self.restclient.ensure_initiator_added(self.xml_file_path,
initiator_name,
host_id)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
self._host_id = host_id
self._hostgroup_id = hostgroup_id
def initialize_connection_fc(self):
"""
TODO: Initialize fc connection.
"""
wwns = huawei_utils.get_fc_wwpns()
if not wwns:
raise VolumeException
# Create hostgroup if not exist.
host_id = self.restclient.add_host_with_check(
self._compute_instance_id)
online_wwns_in_host = (
self.restclient.get_host_online_fc_initiators(host_id))
online_free_wwns = self.restclient.get_online_free_wwns()
for wwn in wwns:
if (wwn not in online_wwns_in_host and
wwn not in online_free_wwns):
wwns_in_host = (
self.restclient.get_host_initiators("fc", host_id))
iqns_in_host = (
self.restclient.get_host_initiators("iscsi", host_id))
if not wwns_in_host and not iqns_in_host:
self.restclient.remove_host(host_id)
LOG.error('Can not add FC initiator to host.')
raise VolumeException
for wwn in wwns:
if wwn in online_free_wwns:
self.restclient.add_fc_port_to_host(host_id, wwn)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
self._host_id = host_id
self._hostgroup_id = hostgroup_id
def initialize_connection(self):
protocol = huawei_utils.get_protocol_info(self.xml_file_path)
if protocol is None:
raise VolumeException
if protocol == 'iSCSI':
self.initialize_connection_iscsi()
else:
self.initialize_connection_fc()
def attach_volume(self, blockdevice_id, attach_to):
"""
Attach ``blockdevice_id`` to the node indicated by ``attach_to``.
:param unicode blockdevice_id: The unique identifier for the block
device being attached.
:param unicode attach_to: An identifier like the one returned by the
``compute_instance_id`` method indicating the node to which to
attach the volume.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
already attached.
:returns: A ``BlockDeviceVolume`` with a ``attached_to`` attribute set
to ``attach_to``.
"""
LOG.info("Call attach_volume blockdevice_id=%s, attach_to=%s"
% (blockdevice_id, attach_to))
try:
lun_info = self.restclient.get_lun_info(blockdevice_id)
except Exception:
raise UnknownVolume(blockdevice_id)
if lun_info['EXPOSEDTOINITIATOR'].lower() == 'true':
raise AlreadyAttachedVolume(blockdevice_id)
self.initialize_connection()
self.restclient.do_mapping(blockdevice_id, self._hostgroup_id,
self._host_id)
huawei_utils.rescan_scsi()
lun_info = self.restclient.get_lun_info(blockdevice_id)
attached_volume = BlockDeviceVolume(
size=int(lun_info['CAPACITY'])*512,
attached_to=unicode(attach_to),
dataset_id=huawei_utils.decode_name(
lun_info['NAME'], self._cluster_id),
blockdevice_id=blockdevice_id)
return attached_volume
def detach_volume(self, blockdevice_id):
"""
Detach ``blockdevice_id`` from whatever host it is attached to.
:param unicode blockdevice_id: The unique identifier for the block
device being detached.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to anything.
:returns: ``None``
"""
LOG.info("Call detach_volume blockdevice_id=%s" % blockdevice_id)
device = self.get_device_path(blockdevice_id)
if device is not None:
huawei_utils.remove_scsi_device(device)
lun_info = self.restclient.get_lun_info(blockdevice_id)
if self.get_attached_to(lun_info) is not None:
self.restclient.delete_mapping(
blockdevice_id, self._compute_instance_id)
else:
LOG.error("Volume %s not attached." % blockdevice_id)
raise UnattachedVolume(blockdevice_id)
def get_attached_to(self, item):
"""
TODO: Find a way to save the attach_to information.
"""
LOG.info("Call get_attached_to")
if item['ISADD2LUNGROUP'] == 'true':
result = self.restclient.get_host_of_lun_map(item['ID'])
if 'data' in result:
return result['data'][0]['NAME']
return None
def list_volumes(self):
"""
List all the block devices available via the back end API.
:returns: A ``list`` of ``BlockDeviceVolume``s.
"""
LOG.info("Call list_volumes")
volumes = []
url = "/lun?range=[0-65535]"
result = self.restclient.call(url, None, "GET")
if 'data' in result:
for item in result['data']:
if huawei_utils.is_cluster_volume(
item['NAME'], self._cluster_id):
volume = BlockDeviceVolume(
size=int(item['CAPACITY'])*512,
attached_to=self.get_attached_to(item),
dataset_id=huawei_utils.decode_name(
item['NAME'], self._cluster_id),
blockdevice_id=unicode(item['ID'])
)
volumes.append(volume)
return volumes
def get_device_path(self, blockdevice_id):
"""
Return the device path that has been allocated to the block device on
the host to which it is currently attached.
:param unicode blockdevice_id: The unique identifier for the block
device.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to a host.
:returns: A ``FilePath`` for the device.
"""
LOG.info("Call get_device_path")
# no mulitpath
try:
lun_info = self.restclient.get_lun_info(blockdevice_id)
except Exception:
raise UnknownVolume(blockdevice_id)
if lun_info['EXPOSEDTOINITIATOR'].lower() == 'false':
raise UnattachedVolume(blockdevice_id)
lun_wwn = lun_info['WWN']
for bd in huawei_utils.get_all_block_device():
bd_wwn = huawei_utils.get_wwn_of_deviceblock(bd)
if bd_wwn is not None and lun_wwn in bd_wwn:
LOG.info("device_path finded: %s" % bd)
return FilePath("/dev/"+bd)
return None
| {
"content_hash": "5553dfc3a39206771f2e1d39aaa35b3e",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 78,
"avg_line_length": 36.831491712707184,
"alnum_prop": 0.5795394884872122,
"repo_name": "huaweistorage/huawei-oceanstor-flocker-plugin",
"id": "e1ac600aaa28c04ab2049308bd5d620f1f9cf548",
"size": "13417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huawei_oceanstor_flocker_plugin/huawei_oceanstor_blockdevice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "59885"
}
],
"symlink_target": ""
} |
from app import app, db, models
from flask import jsonify
available_regions = ['Nelson', 'Dunedin', 'Hamilton', 'Christchurch', 'Gisborne', 'Greymouth', 'Wanganui', 'Tekapo', 'Wellington', 'Turangi', 'Whangarei', 'Alexandra', 'Auckland', 'Rotorua', 'Havelock Nth']
@app.route('/region')
def return_available():
return jsonify(available_regions)
@app.route('/region/<string:region>', methods=['GET'])
def get_region(region):
if region in available_regions:
query = models.Point.query.filter(models.Point.region == region)
return jsonify([point.json() for point in query.all()])
else:
return jsonify([])
| {
"content_hash": "86f85911b629be9d2fa430beeb31f104",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 206,
"avg_line_length": 34.05263157894737,
"alnum_prop": 0.6723338485316847,
"repo_name": "herrjemand/WaterGo2016",
"id": "0e37a88587ad88444f262acdee5e5063568a3e7f",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main_routes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159645"
},
{
"name": "HTML",
"bytes": "5277"
},
{
"name": "JavaScript",
"bytes": "2272"
},
{
"name": "Python",
"bytes": "5737"
}
],
"symlink_target": ""
} |
from stupid.chatbot import ChatBot, trigger
from stupid.quotes import QuotesDatabase
from stupid.utils import weekday
class QuoteBot(ChatBot):
def __init__(self, *args, **kwargs):
super(QuoteBot, self).__init__(*args, **kwargs)
self.schedule.every().day.at("9:25").do(self.post_quote)
self.registry = QuotesDatabase()
@trigger
def on_bash(self):
quote = self.registry.get_random()
self.registry.mark_as_shown(quote)
return ">>>" + quote.text
@weekday
def post_quote(self):
quote = self.registry.get_random()
self.broker.post(">>>" + quote.text)
self.registry.mark_as_shown(quote)
| {
"content_hash": "d6ef8a2aec415c4870036c08e852da23",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 30.727272727272727,
"alnum_prop": 0.6331360946745562,
"repo_name": "peterdemin/stupid",
"id": "50d7b7f7d624d8f2f4cc9cc038a35ca50db6ed1f",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stupid/quotebot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38471"
}
],
"symlink_target": ""
} |
"""
sinecosine.py
Author: Payton
Credit: Morgan, Mr. Dennison
Assignment:
In this assignment you must use *list comprehensions* to generate sprites that show the behavior
of certain mathematical functions: sine and cosine.
The sine and cosine functions are provided in the Python math library. These functions are used
to relate *angles* to *rectangular* (x,y) coordinate systems and can be very useful in computer
game design.
Unlike the last assignment using ggame`, this one will not provide any "skeleton" code to fill
in. You should use your submission for the Picture assignment
(https://github.com/HHS-IntroProgramming/Picture) as a reference for starting this assignment.
See:
https://github.com/HHS-IntroProgramming/Sine-Cosine/blob/master/README.md
for a detailed list of requirements for this assignment.
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics
for general information on how to use ggame.
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Programmed-Graphics
for general information on using list comprehensions to generate graphics.
http://brythonserver.github.io/ggame/
for detailed information on ggame.
"""
import math
from ggame import App, Color, LineStyle, Sprite
from ggame import CircleAsset
red = Color(0xff0000, 1.0)
purple = Color(0x8500B5, 1.0)
blue = Color(0x0000ff, 1.0)
black = Color(0x000000, 1.0)
noline = LineStyle(0, black)
sine = CircleAsset(5, noline, blue)
xcoordinates = range(0, 360, 10)
cosine = CircleAsset(5, noline, red)
radianc = CircleAsset(5, noline, purple)
sineg = [Sprite(sine, (x, 100+100*math.sin(math.radians(x)))) for x in xcoordinates]
cosineg = [Sprite(cosine, (x, 100+100*math.cos(math.radians(x)))) for x in xcoordinates]
radiancg = [Sprite(radianc, (100+100*math.cos(math.radians(x)), (400+100*math.sin(math.radians(x))))) for x in xcoordinates]
myapp = App()
myapp.run() | {
"content_hash": "548019941ab5ef10b330a64c961d859c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 124,
"avg_line_length": 36.69230769230769,
"alnum_prop": 0.7714884696016772,
"repo_name": "phstearns/Sine-Cosine",
"id": "a8077565b0ddd4b3946bb8f3020ef8e17d75d576",
"size": "1908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sinecosine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73095"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
no_dry_run = True
def forwards(self, orm):
"""
Plugin Data migrations are done in 0010. This will test that the
migrations passed correctly.
"""
hipchat_alert = orm['cabot_alert_hipchat.HipchatAlert'].objects.get()
twilio_sms_alert = orm['cabot_alert_twilio.TwilioSMS'].objects.get()
twilio_phone_alert = orm['cabot_alert_twilio.TwilioPhoneCall'].objects.get()
email_alert = orm['cabot_alert_email.EmailAlert'].objects.get()
for service in orm.Service.objects.all():
if service.hipchat_alert:
assert service.alerts.filter(title="Hipchat").count() == 1
if service.email_alert:
assert service.alerts.filter(title="Email").count() == 1
if service.sms_alert:
assert service.alerts.filter(title="Twilio SMS").count() == 1
if service.telephone_alert:
assert service.alerts.filter(title="Twilio Phone Call").count() == 1
for user in orm.UserProfile.objects.all():
assert orm['cabot_alert_hipchat.hipchatalertuserdata'].objects.get(user=user).hipchat_alias == user.hipchat_alias
assert orm['cabot_alert_twilio.twiliouserdata'].objects.get(user=user).phone_number == user.mobile_number
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cabot_alert_email.emailalert': {
'Meta': {'object_name': 'EmailAlert', '_ormbases': [u'cabotapp.AlertPlugin']},
u'alertplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'cabot_alert_hipchat.hipchatalert': {
'Meta': {'object_name': 'HipchatAlert', '_ormbases': [u'cabotapp.AlertPlugin']},
u'alertplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'cabot_alert_hipchat.hipchatalertuserdata': {
'Meta': {'object_name': 'HipchatAlertUserData', '_ormbases': [u'cabotapp.AlertPluginUserData']},
u'alertpluginuserdata_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cabotapp.AlertPluginUserData']", 'unique': 'True', 'primary_key': 'True'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'cabot_alert_twilio.twiliophonecall': {
'Meta': {'object_name': 'TwilioPhoneCall', '_ormbases': [u'cabotapp.AlertPlugin']},
u'alertplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'cabot_alert_twilio.twiliosms': {
'Meta': {'object_name': 'TwilioSMS', '_ormbases': [u'cabotapp.AlertPlugin']},
u'alertplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'cabot_alert_twilio.twiliouserdata': {
'Meta': {'object_name': 'TwilioUserData', '_ormbases': [u'cabotapp.AlertPluginUserData']},
u'alertpluginuserdata_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cabotapp.AlertPluginUserData']", 'unique': 'True', 'primary_key': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
u'cabotapp.alertplugin': {
'Meta': {'object_name': 'AlertPlugin'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.alertplugin_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cabotapp.alertpluginuserdata': {
'Meta': {'unique_together': "(('title', 'user'),)", 'object_name': 'AlertPluginUserData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.alertpluginuserdata_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabotapp.UserProfile']"})
},
u'cabotapp.instance': {
'Meta': {'ordering': "['name']", 'object_name': 'Instance'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alerts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'symmetrical': 'False', 'blank': 'True'}),
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
u'cabotapp.instancestatussnapshot': {
'Meta': {'object_name': 'InstanceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': u"orm['cabotapp.Instance']"}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'cabotapp.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'alerts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'symmetrical': 'False', 'blank': 'True'}),
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instances': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.Instance']", 'symmetrical': 'False', 'blank': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
u'cabotapp.servicestatussnapshot': {
'Meta': {'object_name': 'ServiceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': u"orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'cabotapp.shift': {
'Meta': {'object_name': 'Shift'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabotapp.statuscheck': {
'Meta': {'ordering': "['name']", 'object_name': 'StatusCheck'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cached_health': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'calculated_status': ('django.db.models.fields.CharField', [], {'default': "'passing'", 'max_length': '50', 'blank': 'True'}),
'check_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'debounce': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'endpoint': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'expected_num_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'default': "'ERROR'", 'max_length': '30'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'max_queued_build_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'metric': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.statuscheck_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status_code': ('django.db.models.fields.TextField', [], {'default': '200', 'null': 'True'}),
'text_match': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'verify_ssl_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'cabotapp.statuscheckresult': {
'Meta': {'object_name': 'StatusCheckResult'},
'check': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabotapp.StatusCheck']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'})
},
u'cabotapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'fallback_alert_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabot_alert_hipchat', 'cabot_alert_email', 'cabot_alert_twilio', 'cabotapp']
symmetrical = True
| {
"content_hash": "320a272170893f17a808ffde4cc1048c",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 211,
"avg_line_length": 80.2304347826087,
"alnum_prop": 0.5693383189725247,
"repo_name": "spladug/cabot",
"id": "fc0d02fe2bb378e1d46616a13c1d34efac358d0b",
"size": "18477",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "cabot/cabotapp/migrations/0011_plugin_data_migration_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21910"
},
{
"name": "HTML",
"bytes": "71387"
},
{
"name": "JavaScript",
"bytes": "368548"
},
{
"name": "Python",
"bytes": "304660"
},
{
"name": "Shell",
"bytes": "7086"
}
],
"symlink_target": ""
} |
import json
import logging
import requests
from tornado.options import options
from model import TextMessage
class IFTTT(object):
def __init__(self, key):
self.key = key
self.logger = logging.getLogger("ifttt")
def url(self, event):
return 'https://maker.ifttt.com/trigger/{0}/with/key/{1}'.format(event, self.key)
def event(self, event, value1="", value2="", value3=""):
try:
requests.post(url=self.url(event),
headers={"Content-Type": "application/json; charset=utf-8"},
data=json.dumps({
"value3": value3,
"value2": value2,
"value1": value1
})
)
except requests.exceptions.RequestException:
self.logger.error("HTTP Request failed")
def sms_callback(self, sms):
self.event("sms_received", sms.number, sms.text, sms.time.strftime("%Y-%m-%d %H:%M:%S"))
def init_handles(gsm):
gsm.add_sms_callback(TextMessage.save_sms)
# ifttt
if options.ifttt_key:
ifttt = IFTTT(options.ifttt_key)
gsm.add_sms_callback(ifttt.sms_callback)
| {
"content_hash": "adc2995545c148976398f387e4a2b115",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 96,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.551219512195122,
"repo_name": "senghoo/operator",
"id": "8d9fe41dd4199677d4e09d913b930adc28715c32",
"size": "1230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1558"
},
{
"name": "HTML",
"bytes": "3432"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "5883"
}
],
"symlink_target": ""
} |
import MySQLdb
import xlsxwriter
import mysql.connector
import datetime
import logging
import sys
from xml.dom import minidom
doc = minidom.parse("mykong.xml")
majors = doc.getElementsByTagName("major")
get_user = doc.getElementsByTagName("user")[0]
get_pwd = doc.getElementsByTagName("password")[0]
get_db = doc.getElementsByTagName("dbname")[0]
get_host = doc.getElementsByTagName("host")[0]
user = get_user.firstChild.data
pwd = get_pwd.firstChild.data
dbn = get_db.firstChild.data
host = get_host.firstChild.data
log = open("myprog.log", "a")
sys.stdout = log
db = mysql.connector.connect(user=user, password=pwd,
host=host,
database=dbn)
cursor = db.cursor()
for major in majors:
title = major.getElementsByTagName("title")[0]
heading = title.firstChild.data
sql = major.getElementsByTagName("sql")[0]
location = major.getElementsByTagName("location")[0]
loc = location.firstChild.data
query=sql.firstChild.data
try:
print query
cursor.execute(query)
result = cursor.fetchall()
num_fields = len(cursor.description)
field_names = [i[0] for i in cursor.description]
print result
workbook = xlsxwriter.Workbook(loc)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
date_format = workbook.add_format({'num_format': 'mmmm d yyyy'})
time_format = workbook.add_format({'num_format': 'hh:mm:ss'})
timestamp_format = workbook.add_format({'num_format': 'dd/mm/yy hh:mm:ss'})
format = workbook.add_format()
size = workbook.add_format()
align = workbook.add_format()
format.set_border()
date_format.set_border()
time_format.set_border()
timestamp_format.set_border()
align.set_border()
format.set_bg_color('cyan')
size.set_font_size(20)
align.set_align('left')
date_format.set_align('left')
time_format.set_align('left')
timestamp_format.set_align('left')
worksheet.write(0,0,heading,size)
worksheet.set_column(0,6,10)
format.set_bold()
row=1
col=0
j = 0
for rows in field_names:
worksheet.write(row,col,field_names[j],format)
col = col + 1
j = j + 1
n=0
for rows in result:
col=0
row = row + 1
for cols in rows:
if type(result[n][col]) is datetime.date:
worksheet.write(row,col,result[n][col],date_format)
if type(result[n][col]) is datetime.timedelta :
worksheet.write(row,col,result[n][col],time_format)
if type(result[n][col]) is datetime.datetime:
worksheet.write(row,col,result[n][col],timestamp_format)
else:
worksheet.write(row,col,result[n][col],align)
col = col + 1
n = n+1
except Exception as inst:
print "database & workbook is closing due to Exception"
workbook.close()
db.close()
print "database closed"
print "fine" | {
"content_hash": "285552da29ec2d3e885012f4a76da5e6",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 85,
"avg_line_length": 27.872,
"alnum_prop": 0.5473593570608496,
"repo_name": "atharva1996/squareinch",
"id": "9f96550cefc60a36c964405cc48701e99e9c4e8a",
"size": "3484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20623"
}
],
"symlink_target": ""
} |
import murano.dsl.murano_object as murano_object
class AttributeStore(object):
def __init__(self):
self._attributes = {}
def set(self, tagged_object, owner_type, name, value):
if isinstance(value, murano_object.MuranoObject):
value = value.object_id
key = (tagged_object.object_id, owner_type.name, name)
if value is None:
self._attributes.pop(key, None)
else:
self._attributes[key] = value
def get(self, tagged_object, owner_type, name):
return self._attributes.get(
(tagged_object.object_id, owner_type.name, name))
def serialize(self, known_objects):
return [
[key[0], key[1], key[2], value]
for key, value
in self._attributes.iteritems()
if key[0] in known_objects
]
def load(self, data):
for item in data:
if item[3] is not None:
self._attributes[(item[0], item[1], item[2])] = item[3]
| {
"content_hash": "9fedb8ca71fabee4b77be4186abb3d9e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 30.727272727272727,
"alnum_prop": 0.5650887573964497,
"repo_name": "chenyujie/hybrid-murano",
"id": "dd5038bde346f863f0ceb449056b33c6c453365d",
"size": "1628",
"binary": false,
"copies": "5",
"ref": "refs/heads/hybrid-master",
"path": "murano/dsl/attribute_store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1013"
},
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "1004440"
},
{
"name": "Shell",
"bytes": "6751"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import glob
import sys
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.script import Script
reload(sys)
sys.setdefaultencoding('utf8')
config = Script.get_config()
zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
RESULT_CODE_OK = 'OK'
RESULT_CODE_CRITICAL = 'CRITICAL'
RESULT_CODE_UNKNOWN = 'UNKNOWN'
def execute(configurations={}, parameters={}, host_name=None):
try:
pid_file = glob.glob(zeppelin_pid_dir + '/zeppelin-' + zeppelin_user + '-*.pid')[0]
check_process_status(pid_file)
except ComponentIsNotRunning as ex:
return (RESULT_CODE_CRITICAL, [str(ex)])
except:
return (RESULT_CODE_CRITICAL, ["Zeppelin is not running"])
return (RESULT_CODE_OK, ["Successful connection to Zeppelin"])
| {
"content_hash": "0e0f786386c0d0e3c6511589cc58c150",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 93,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.7672018348623854,
"repo_name": "arenadata/ambari",
"id": "cc51784ba8c6a5e363cffc83c81ee7d837b4d185",
"size": "1744",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/alert_check_zeppelin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
"""TraceEventImporter imports TraceEvent-formatted data
into the provided model.
This is a port of the trace event importer from
https://code.google.com/p/trace-viewer/
"""
from __future__ import division
from __future__ import absolute_import
import collections
import six
import telemetry.timeline.async_slice as tracing_async_slice
import telemetry.timeline.flow_event as tracing_flow_event
from telemetry.timeline import importer
from telemetry.timeline import memory_dump_event
from tracing.trace_data import trace_data as trace_data_module
# 2To3-division: those lines like xxx / 1000.0 are unchanged as result is
# expected floats.
class TraceEventTimelineImporter(importer.TimelineImporter):
def __init__(self, model, trace_data):
super().__init__(model, trace_data)
self._trace_data = trace_data
self._all_async_events = []
self._all_object_events = []
self._all_flow_events = []
self._all_memory_dumps_by_dump_id = collections.defaultdict(list)
self._events = []
self._metadata = []
for trace in trace_data.GetTracesFor(trace_data_module.CHROME_TRACE_PART):
self._events.extend(trace['traceEvents'])
self.CollectMetadataRecords(trace)
def CollectMetadataRecords(self, trace):
part_field_names = {p.raw_field_name for p in
trace_data_module.ALL_TRACE_PARTS}
for k, v in six.iteritems(trace):
if k in part_field_names:
continue
self._metadata.append({'name': k, 'value': v})
@staticmethod
def GetSupportedPart():
return trace_data_module.CHROME_TRACE_PART
def _GetOrCreateProcess(self, pid):
return self._model.GetOrCreateProcess(pid)
def _ProcessAsyncEvent(self, event):
"""Helper to process an 'async finish' event, which will close an
open slice.
"""
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
self._all_async_events.append({
'event': event,
'thread': thread})
def _ProcessCounterEvent(self, event):
"""Helper that creates and adds samples to a Counter object based on
'C' phase events.
"""
if 'id' in event:
ctr_name = event['name'] + '[' + str(event['id']) + ']'
else:
ctr_name = event['name']
ctr = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateCounter(event['cat'], ctr_name))
# Initialize the counter's series fields if needed.
if len(ctr.series_names) == 0:
#TODO: implement counter object
for series_name in event['args']:
ctr.series_names.append(series_name)
if len(ctr.series_names) == 0:
self._model.import_errors.append(
'Expected counter ' + event['name'] +
' to have at least one argument to use as a value.')
# Drop the counter.
del ctr.parent.counters[ctr.full_name]
return
# Add the sample values.
ctr.timestamps.append(event['ts'] / 1000.0)
for series_name in ctr.series_names:
if series_name not in event['args']:
ctr.samples.append(0)
continue
ctr.samples.append(event['args'][series_name])
def _ProcessObjectEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
self._all_object_events.append({
'event': event,
'thread': thread})
def _ProcessDurationEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0):
self._model.import_errors.append(
'Timestamps are moving backward.')
return
if event['ph'] == 'B':
thread.BeginSlice(event['cat'],
event['name'],
event['ts'] / 1000.0,
event['tts'] / 1000.0 if 'tts' in event else None,
event['args'])
elif event['ph'] == 'E':
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0):
self._model.import_errors.append(
'Timestamps are moving backward.')
return
if not thread.open_slice_count:
self._model.import_errors.append(
'E phase event without a matching B phase event.')
return
new_slice = thread.EndSlice(
event['ts'] / 1000.0,
event['tts'] / 1000.0 if 'tts' in event else None)
for arg_name, arg_value in six.iteritems(event.get('args', {})):
if arg_name in new_slice.args:
self._model.import_errors.append(
'Both the B and E phases of ' + new_slice.name +
' provided values for argument ' + arg_name + '. ' +
'The value of the E phase event will be used.')
new_slice.args[arg_name] = arg_value
def _ProcessCompleteEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.PushCompleteSlice(
event['cat'],
event['name'],
event['ts'] / 1000.0,
event['dur'] / 1000.0 if 'dur' in event else None,
event['tts'] / 1000.0 if 'tts' in event else None,
event['tdur'] / 1000.0 if 'tdur' in event else None,
event['args'])
def _ProcessMarkEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.PushMarkSlice(
event['cat'],
event['name'],
event['ts'] / 1000.0,
event['tts'] / 1000.0 if 'tts' in event else None,
event['args'] if 'args' in event else None)
def _ProcessMetadataEvent(self, event):
if event['name'] == 'thread_name':
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.name = event['args']['name']
elif event['name'] == 'process_name':
process = self._GetOrCreateProcess(event['pid'])
process.name = event['args']['name']
elif event['name'] == 'process_labels':
process = self._GetOrCreateProcess(event['pid'])
process.labels = event['args']['labels']
elif event['name'] == 'process_uptime_seconds':
process = self._GetOrCreateProcess(event['pid'])
process.uptime_seconds = event['args']['uptime']
elif event['name'] == 'trace_buffer_overflowed':
process = self._GetOrCreateProcess(event['pid'])
process.SetTraceBufferOverflowTimestamp(event['args']['overflowed_at_ts'])
else:
self._model.import_errors.append(
'Unrecognized metadata name: ' + event['name'])
def _ProcessInstantEvent(self, event):
# Treat an Instant event as a duration 0 slice.
# SliceTrack's redraw() knows how to handle this.
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.BeginSlice(event['cat'],
event['name'],
event['ts'] / 1000.0,
args=event.get('args'))
thread.EndSlice(event['ts'] / 1000.0)
def _ProcessSampleEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.AddSample(event['cat'],
event['name'],
event['ts'] / 1000.0,
event.get('args'))
def _ProcessFlowEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
self._all_flow_events.append({
'event': event,
'thread': thread})
def _ProcessMemoryDumpEvents(self, events):
# Dictionary to order dumps by id and process.
global_dumps = {}
for event in events:
global_dump = global_dumps.setdefault(event['id'], {})
dump_events = global_dump.setdefault(event['pid'], [])
dump_events.append(event)
for dump_id, global_dump in six.iteritems(global_dumps):
for pid, dump_events in six.iteritems(global_dump):
process = self._GetOrCreateProcess(pid)
memory_dump = memory_dump_event.ProcessMemoryDumpEvent(process,
dump_events)
process.AddMemoryDumpEvent(memory_dump)
self._all_memory_dumps_by_dump_id[dump_id].append(memory_dump)
def ImportEvents(self):
"""Walks through the events_ list and outputs the structures discovered to
model_.
"""
for r in self._metadata:
self._model.metadata.append(r)
memory_dump_events = []
for event in self._events:
phase = event.get('ph', None)
if phase in ('B', 'E'):
self._ProcessDurationEvent(event)
elif phase == 'X':
self._ProcessCompleteEvent(event)
# Note, S, F, T are deprecated and replaced by 'b' and 'e'. For
# backwards compatibility continue to support them here.
elif phase in ('S', 'F', 'T'):
self._ProcessAsyncEvent(event)
elif phase in ('b', 'e'):
self._ProcessAsyncEvent(event)
elif phase == 'n':
self._ProcessAsyncEvent(event)
# Note, I is historic. The instant event marker got changed, but we
# want to support loading old trace files so we have both I and i.
elif phase in ('I', 'i'):
self._ProcessInstantEvent(event)
elif phase == 'P':
self._ProcessSampleEvent(event)
elif phase == 'C':
self._ProcessCounterEvent(event)
elif phase == 'M':
self._ProcessMetadataEvent(event)
elif phase in ('N', 'D', 'O'):
self._ProcessObjectEvent(event)
elif phase in ('s', 't', 'f'):
self._ProcessFlowEvent(event)
elif phase == 'v':
memory_dump_events.append(event)
elif phase == 'R':
self._ProcessMarkEvent(event)
else:
self._model.import_errors.append(
'Unrecognized event phase: ' + phase + '(' + event['name'] + ')')
# Memory dumps of a process with the same dump id need to be merged before
# processing. So, memory dump events are processed all at once.
self._ProcessMemoryDumpEvents(memory_dump_events)
return self._model
def FinalizeImport(self):
"""Called by the Model after all other importers have imported their
events."""
self._model.UpdateBounds()
# We need to reupdate the bounds in case the minimum start time changes
self._model.UpdateBounds()
self._CreateAsyncSlices()
self._CreateFlowSlices()
self._SetBrowserProcess()
self._SetGpuProcess()
self._SetSurfaceFlingerProcess()
self._CreateExplicitObjects()
self._CreateImplicitObjects()
self._CreateMemoryDumps()
def _CreateAsyncSlices(self):
if len(self._all_async_events) == 0:
return
self._all_async_events.sort(key=lambda x: x['event']['ts'])
async_event_states_by_name_then_id = {}
all_async_events = self._all_async_events
# pylint: disable=too-many-nested-blocks
for async_event_state in all_async_events:
event = async_event_state['event']
name = event.get('name', None)
if name is None:
self._model.import_errors.append(
'Async events (ph: b, e, n, S, T or F) require an name parameter.')
continue
if 'id2' in event:
if 'global' in event['id2']:
event_id = event['id2']['global']
else:
event_id = '%s.%s' % (event['pid'], event['id2']['local'])
else:
event_id = event.get('id')
if event_id is None:
self._model.import_errors.append(
'Async events (ph: b, e, n, S, T or F) require an id parameter.')
continue
# TODO(simonjam): Add a synchronous tick on the appropriate thread.
if event['ph'] == 'S' or event['ph'] == 'b':
if not name in async_event_states_by_name_then_id:
async_event_states_by_name_then_id[name] = {}
if event_id in async_event_states_by_name_then_id[name]:
self._model.import_errors.append(
'At %d, a slice of the same id %s was already open.' % (
event['ts'], event_id))
continue
async_event_states_by_name_then_id[name][event_id] = []
async_event_states_by_name_then_id[name][event_id].append(
async_event_state)
elif event['ph'] == 'n':
thread_start = event['tts'] / 1000.0 if 'tts' in event else None
async_slice = tracing_async_slice.AsyncSlice(
event['cat'], name, event['ts'] / 1000.0,
event['args'], 0, async_event_state['thread'],
async_event_state['thread'], thread_start
)
async_slice.id = event_id
async_slice.start_thread.AddAsyncSlice(async_slice)
else:
if name not in async_event_states_by_name_then_id:
self._model.import_errors.append(
'At %d, no slice named %s was open.' % (event['ts'], name,))
continue
if event_id not in async_event_states_by_name_then_id[name]:
self._model.import_errors.append(
'At %d, no slice named %s with id=%s was open.' % (
event['ts'], name, event_id))
continue
events = async_event_states_by_name_then_id[name][event_id]
events.append(async_event_state)
if event['ph'] == 'F' or event['ph'] == 'e':
# Create a slice from start to end.
async_slice = tracing_async_slice.AsyncSlice(
events[0]['event']['cat'],
name,
events[0]['event']['ts'] / 1000.0)
async_slice.duration = (
(event['ts'] / 1000.0) - (events[0]['event']['ts'] / 1000.0))
async_slice.start_thread = events[0]['thread']
async_slice.end_thread = async_event_state['thread']
if async_slice.start_thread == async_slice.end_thread:
if 'tts' in event and 'tts' in events[0]['event']:
async_slice.thread_start = events[0]['event']['tts'] / 1000.0
async_slice.thread_duration = (
(event['tts'] / 1000.0)
- (events[0]['event']['tts'] / 1000.0))
async_slice.id = event_id
async_slice.args = events[0]['event']['args']
# Create sub_slices for each step.
for j in range(1, len(events)):
sub_name = name
if events[j - 1]['event']['ph'] == 'T':
sub_name = name + ':' + events[j - 1]['event']['args']['step']
sub_slice = tracing_async_slice.AsyncSlice(
events[0]['event']['cat'],
sub_name,
events[j - 1]['event']['ts'] / 1000.0)
sub_slice.parent_slice = async_slice
sub_slice.duration = (
(events[j]['event']['ts'] / 1000.0)
- (events[j - 1]['event']['ts'] / 1000.0))
sub_slice.start_thread = events[j - 1]['thread']
sub_slice.end_thread = events[j]['thread']
if sub_slice.start_thread == sub_slice.end_thread:
if 'tts' in events[j]['event'] and \
'tts' in events[j - 1]['event']:
sub_slice.thread_duration = (
(events[j]['event']['tts'] / 1000.0)
- (events[j - 1]['event']['tts'] / 1000.0))
sub_slice.id = event_id
sub_slice.args = events[j - 1]['event']['args']
async_slice.AddSubSlice(sub_slice)
# The args for the finish event go in the last sub_slice.
last_slice = async_slice.sub_slices[-1]
for arg_name, arg_value in six.iteritems(event['args']):
last_slice.args[arg_name] = arg_value
# Add |async_slice| to the start-thread's async_slices.
async_slice.start_thread.AddAsyncSlice(async_slice)
del async_event_states_by_name_then_id[name][event_id]
def _CreateExplicitObjects(self):
# TODO(tengs): Implement object instance parsing
pass
def _CreateImplicitObjects(self):
# TODO(tengs): Implement object instance parsing
pass
def _CreateFlowSlices(self):
if len(self._all_flow_events) == 0:
return
self._all_flow_events.sort(key=lambda x: x['event']['ts'])
flow_id_to_event = {}
for data in self._all_flow_events:
event = data['event']
thread = data['thread']
if 'name' not in event:
self._model.import_errors.append(
'Flow events (ph: s, t or f) require a name parameter.')
continue
if 'id' not in event:
self._model.import_errors.append(
'Flow events (ph: s, t or f) require an id parameter.')
continue
flow_event = tracing_flow_event.FlowEvent(
event['cat'],
event['id'],
event['name'],
event['ts'] / 1000.0,
event['args'])
thread.AddFlowEvent(flow_event)
if event['ph'] == 's':
if event['id'] in flow_id_to_event:
self._model.import_errors.append(
'event id %s already seen when encountering start of'
'flow event.' % event['id'])
continue
flow_id_to_event[event['id']] = flow_event
elif event['ph'] == 't' or event['ph'] == 'f':
if not event['id'] in flow_id_to_event:
self._model.import_errors.append(
'Found flow phase %s for id: %s but no flow start found.' % (
event['ph'], event['id']))
continue
flow_position = flow_id_to_event[event['id']]
self._model.flow_events.append([flow_position, flow_event])
if event['ph'] == 'f':
del flow_id_to_event[event['id']]
else:
# Make this event the next start event in this flow.
flow_id_to_event[event['id']] = flow_event
def _CreateMemoryDumps(self):
self._model.SetGlobalMemoryDumps(
memory_dump_event.GlobalMemoryDump(events)
for events in six.itervalues(self._all_memory_dumps_by_dump_id))
def _SetBrowserProcess(self):
for thread in self._model.GetAllThreads():
if thread.name == 'CrBrowserMain':
self._model.browser_process = thread.parent
def _SetGpuProcess(self):
gpu_thread_names = [
'DrmThread', 'CrGpuMain', 'VizMain', 'VizCompositorThread']
for thread in self._model.GetAllThreads():
if thread.name in gpu_thread_names:
self._model.gpu_process = thread.parent
def _SetSurfaceFlingerProcess(self):
for process in self._model.GetAllProcesses():
if process.name == 'SurfaceFlinger':
self._model.surface_flinger_process = process
| {
"content_hash": "7f58d927d2d3db135842d5bdd549dbcf",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 80,
"avg_line_length": 38.1881390593047,
"alnum_prop": 0.5883045946235408,
"repo_name": "catapult-project/catapult",
"id": "cdfae607c598bf908ceb3ed5eef92d2aa11d4c00",
"size": "18836",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "telemetry/telemetry/timeline/trace_event_importer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
"""Support for MyQ-Enabled lights."""
from pymyq.errors import MyQError
from homeassistant.components.light import LightEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import MyQEntity
from .const import DOMAIN, MYQ_COORDINATOR, MYQ_GATEWAY, MYQ_TO_HASS
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up myq lights."""
data = hass.data[DOMAIN][config_entry.entry_id]
myq = data[MYQ_GATEWAY]
coordinator = data[MYQ_COORDINATOR]
async_add_entities(
[MyQLight(coordinator, device) for device in myq.lamps.values()], True
)
class MyQLight(MyQEntity, LightEntity):
"""Representation of a MyQ light."""
_attr_supported_features = 0
@property
def is_on(self):
"""Return true if the light is on, else False."""
return MYQ_TO_HASS.get(self._device.state) == STATE_ON
@property
def is_off(self):
"""Return true if the light is off, else False."""
return MYQ_TO_HASS.get(self._device.state) == STATE_OFF
async def async_turn_on(self, **kwargs):
"""Issue on command to light."""
if self.is_on:
return
try:
await self._device.turnon(wait_for_state=True)
except MyQError as err:
raise HomeAssistantError(
f"Turning light {self._device.name} on failed with error: {err}"
) from err
# Write new state to HASS
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Issue off command to light."""
if self.is_off:
return
try:
await self._device.turnoff(wait_for_state=True)
except MyQError as err:
raise HomeAssistantError(
f"Turning light {self._device.name} off failed with error: {err}"
) from err
# Write new state to HASS
self.async_write_ha_state()
| {
"content_hash": "122f3f1f6effd7d569ea0d08d7002b2d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 30.589041095890412,
"alnum_prop": 0.6466636811464398,
"repo_name": "rohitranjan1991/home-assistant",
"id": "d3f6c6de36bdf42d71a7eb0872fc644f2e4d7ae2",
"size": "2233",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/myq/light.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
from xierpa3.adapters.adapter import Adapter
#from xierpa3.toolbox.database.sql.sqldatastore import SqlDatastore
#from xierpa3.toolbox.database.base.defaultmodels import Item
#from xierpa3.toolbox.database.base.selector import Selector
class Selector():
# @@@ Under development
pass
class SqlDatastore():
# @@@ Under development
pass
class SQLAdapter(Adapter):
# @@@ Under development
SQL_NOTDELETED = 'deleted is not TRUE'
def __init__(self, database_name, model=None):
Adapter.__init__(self)
self._db = self.openDatabase(database_name, model)
def getModel(self):
u"""
Model of the database for Python representation and operation of data set. By default represent Item table only.
"""
#return {'item': Item}
def openDatabase(self, database_name, model):
return SqlDatastore(database_name, model)
def getRecord(self, table='item', id=None, readonly=True, data=None, fields=None, **args):
u"""
The ``getRecord`` method answers the result of ``self._db.getRecord``. If ``id`` is
not defined, then create a new record, using the optional ``args`` as values to initialize. If the record
cannot be found, then answer an instance if ``NoneRecord``.
"""
return self._db.getRecord(table=table, id=id, readonly=readonly,
fields=fields or self._db.getFieldNames(table), data=data, **args)
def getSelection(self, table=None, selector=None, where=None, order=None, slice=None, start=None,
parentid=None, readonly=True, query=None, fields=None, deleted=False, andor='AND',
selectall=False, *args, **kwargs):
u"""
The ``getSelection`` method is a front for ``self._db.getSelection()``. The difference is that
it allows three ways to defined the selection clause: ``**args`` is the dictionary of additional
attributes that will be composed into an AND clause ``where``, or the ``selector`` instance or
the raw ``where``. If one of these attributes is defined, then the value of the preceding ones are
ignored.<br/>
"""
if not query:
table = table or 'item'
if kwargs:
selector = Selector(kwargs, andor=andor)
if not deleted:
if not selector and selectall:
selector = Selector()
if selector:
selector = self.addNotDeleted(table, selector)
if selector:
where = selector.getClause()
if self._db is None:
return self.getNoneRecord(table)
if not fields:
fields = self._db.getFieldNames(table)
return self._db.getSelection(table=table, fields=fields, where=where,
order=order, slice=slice, start=start, readonly=readonly, query=query)
def getEmptySelection(self, table='item', fields=None, **args):
if not fields:
fields = self._db.getFieldNames(table)
return self._db.getEmptySelection(table=table, fields=fields, **args)
if __name__ == "__main__":
pass
| {
"content_hash": "1e202811f14b827a8044f23e44a73998",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 120,
"avg_line_length": 38.901234567901234,
"alnum_prop": 0.6296413836877182,
"repo_name": "petrvanblokland/Xierpa3",
"id": "46abb92ebbada68a6886ace6ab44f1c9313935ed",
"size": "3508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xierpa3/adapters/sqladapter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41394"
},
{
"name": "JavaScript",
"bytes": "1507"
},
{
"name": "Python",
"bytes": "1349828"
}
],
"symlink_target": ""
} |
'a string contains extra whitespaces'
| {
"content_hash": "eb5ebd2d896f3b71af02210f1c1adfbe",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 38,
"alnum_prop": 0.8157894736842105,
"repo_name": "ssato/python-anyconfig",
"id": "22f1dd18d845593cef968fa0837413bfd8c5e75c",
"size": "38",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "tests/res/parser/single/10/e/70.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "568"
},
{
"name": "Python",
"bytes": "348779"
},
{
"name": "Shell",
"bytes": "3456"
}
],
"symlink_target": ""
} |
import argparse
import datetime
from distroinfo import info
from distroinfo import query
from rdoutils import review_utils
from rdoutils import releases_utils
rdoinfo_repo = ('https://raw.githubusercontent.com/'
'redhat-openstack/rdoinfo/master/')
def parse_args():
parser = argparse.ArgumentParser(description='List new releases tagged in '
'OpenStack projects managed by release '
'project')
parser.add_argument('-r', '--release', dest='release',
default='ocata',
help='Project to list open reviews')
parser.add_argument('-d', '--days', dest='days', default=2, type=int,
help='Number of days to list new releases')
parser.add_argument('-n', '--review-number', dest='number', default=None,
help='Review number')
return parser.parse_args()
def format_time(time):
tformat = '%Y-%m-%d %H:%M:%S.%f000'
return datetime.datetime.strptime(time, tformat)
def main():
args = parse_args()
if args.number:
after_fmt = None
else:
after = datetime.datetime.now() - datetime.timedelta(days=args.days)
after_fmt = after.strftime('%Y-%m-%d')
reviews = review_utils.get_osp_releases_reviews(args.release,
after=after_fmt,
number=args.number,
status='merged')
distroinfo = info.DistroInfo(
info_files='rdo.yml',
remote_info=rdoinfo_repo)
inforepo = distroinfo.get_info()
for review in reviews:
submitted = format_time(review['submitted'])
review_number = review['_number']
releases = releases_utils.get_new_releases_review(review)
for release in releases:
for repo in release['repos']:
pkg = query.find_package(inforepo, repo, strict=True)
if pkg:
name = pkg['name']
else:
name = repo
print("%s %s %s %s" % (review_number, submitted,
release['version'], name))
| {
"content_hash": "1fe99da0cc44641e01744d8335de9b27",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 38.59322033898305,
"alnum_prop": 0.5344751866490997,
"repo_name": "rdo-infra/releng",
"id": "7a83c4a6d598bd032860996ffc4c14ff09b071b5",
"size": "2278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdoutils/cmd/new_releases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "76858"
},
{
"name": "Shell",
"bytes": "24314"
}
],
"symlink_target": ""
} |
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MPFI.jl'
copyright = u'2013, Alessandro Andrioni'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MPFIjldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MPFIjl.tex', u'MPFI.jl Documentation',
u'Alessandro Andrioni', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'MPFIjl', u'MPFI.jl Documentation',
[u'Alessandro Andrioni'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MPFIjl', u'MPFI.jl Documentation',
u'Alessandro Andrioni', 'MPFIjl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "e63104e3007979a3ed027c7e45606e90",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.222707423580786,
"alnum_prop": 0.7017211004201112,
"repo_name": "JuliaPackageMirrors/MPFI.jl",
"id": "f2a493ca70377dff38841613d46fa8f36af5e9be",
"size": "7797",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "20303"
}
],
"symlink_target": ""
} |
"""
Concurrent-learning controller derived for
a two-linkage robotic manipulator. Includes
repetitive learning if the path to track
is cyclical.
"""
################################################# DEPENDENCIES
from __future__ import division
import numpy as np
import numpy.linalg as npl
from collections import deque
from cmath import sqrt
################################################# PRIMARY CLASS
class Controller:
def __init__(self, dt, q0, target, path_type,
kp, kd, kg, ku, kf,
umax, vmax, amax,
history_size, filter_window, adapt0):
"""
Set-up. Takes call-period, initial state, target pose, path type,
gains, integral/filter window size, effort limit, maximum speed
and acceleration, history stack size, selection type, and initial condition.
"""
self.ncontrols = len(umax)
self.nparams = len(adapt0)
if filter_window and np.isfinite(filter_window):
self.nfilt = int(filter_window / dt)
else:
self.nfilt = 0
self.set_gains(kp, kd, kg, ku, kf)
self.set_limits(umax, vmax, amax)
self.adapt = adapt0
self.adapt_err = np.zeros(self.nparams)
self.Lest = np.zeros(2)
self.mest = np.zeros(2)
self.gest = 0
self.uf = np.zeros(self.ncontrols)
self.Yuf = np.zeros((self.ncontrols, self.nparams))
self.Yuf1 = np.zeros((self.ncontrols, self.nparams))
self.uf_stack = deque([self.uf] * self.nfilt)
self.Yuf1_stack = deque([self.Yuf1] * self.nfilt)
self.q0 = q0
self.q_stack = deque([q0] * self.nfilt)
self.history_stack = deque([self.make_history_pair(self.Yuf, self.uf)] * history_size)
self.history_size = history_size
self.history_eig = 0
self.YY_stack = deque([np.zeros((self.nparams, self.nparams))] * history_size)
self.YY_sum = np.zeros((self.nparams, self.nparams))
self.time = 0
self.set_path(q0, target, path_type, dt)
self.rep = np.zeros(self.ncontrols)
self.rep_T = np.zeros(self.ncontrols)
self.rep_stack = deque([self.rep] * self.ncycle)
self.kill = False
########################
def set_gains(self, kp, kd, kg, ku, kf):
"""
Sets proportional, derivative, adaptive, and filter gains.
"""
self.kp = np.array(kp, dtype=np.float32)
self.kd = np.array(kd, dtype=np.float32)
self.kr = self.kp / self.kd
if type(kg) is str:
if kg == 'LS':
self.kg = 100*np.eye(self.nparams)
self.use_LS = True
else:
raise ValueError("Did you mean kg = 'LS' (least squares)?")
else:
self.kg = np.diag(kg)
self.use_LS = False
self.ku = np.diag(ku)
self.kf = np.array(kf, dtype=np.float32)
########################
def set_limits(self, umax, vmax, amax):
"""
Sets model limits.
Uses the limits to compute a model reference for tracking,
and uses repmax for limiting repetitive learning.
"""
self.umax = np.array(umax, dtype=np.float32)
self.vmax = np.array(vmax, dtype=np.float32)
self.amax = np.array(amax, dtype=np.float32)
self.saturated = False
if np.inf in self.umax or 0 in self.umax:
self.umaxref = np.array([250, 30], dtype=np.float32)
else:
self.umaxref = self.umax
self.dref = self.umaxref / self.vmax
if np.inf in self.amax:
self.mref = np.array([0.01, 0.01], dtype=np.float32)
else:
self.mref = self.umaxref / self.amax
self.repmax = np.array([15, 15])
########################
def set_path(self, q0, target, path_type, dt):
"""
Resets controller time and reference acceleration.
Sets the path initial state, the target position, and the
type of path. Updates reference q to its initial t=0 value.
If the path will be cyclic, repetitive learning is enabled.
The path cycle period is hardcoded in.
"""
self.path_time = 0
self.qref = np.array(q0)
self.aref = np.zeros(self.ncontrols)
self.path_type = path_type
if path_type == 'train':
self.target = 2*np.pi*(np.random.rand(2) - 0.5)
else:
self.target = np.array(target)
if path_type == 'cycle':
self.use_RL = True
else:
self.use_RL = False
self.Tcycle = 5 # s
self.ncycle = int(2 * self.Tcycle / dt)
self.update_ref(0)
########################
def get_effort(self, q, dt):
"""
Returns the vector of torques as a PD controller plus
a feedforward term that uses an estimate of the system's
physical parameters. The output is saturated at umax as
specified by the user previously. Before returning the
torques, the latest parameter estimate is also updated.
"""
# Tracking errors
E = self.qref[:2] - q[:2]
Edot = self.qref[2:] - q[2:]
tracking_err = self.kr*E + Edot
# Tracking regressor
Y = np.array([
[np.cos(q[0]),
self.aref[0] - self.kr[0]*q[2] + self.kr[0]*self.qref[2],
np.cos(q[0] + q[1]),
np.cos(q[1])*(2*self.aref[0] + self.aref[1] - 2*self.kr[0]*q[2] - self.kr[1]*q[3] + 2*self.kr[0]*self.qref[2] + self.kr[1]*self.qref[3]) - q[3]*np.sin(q[1])*(2*q[2] + q[3]),
self.aref[0] + self.aref[1] - self.kr[0]*q[2] - self.kr[1]*q[3] + self.kr[0]*self.qref[2] + self.kr[1]*self.qref[3]],
[0,
0,
np.cos(q[0] + q[1]),
q[2]**2*np.sin(q[1]) + np.cos(q[1])*(self.aref[0] - self.kr[0]*q[2] + self.kr[0]*self.qref[2]),
self.aref[0] + self.aref[1] - self.kr[0]*q[2] - self.kr[1]*q[3] + self.kr[0]*self.qref[2] + self.kr[1]*self.qref[3]]
])
# Control law
u = self.kp*E + self.kd*Edot + Y.dot(self.adapt) + self.rep
# Learning gradient gain
if self.use_LS:
# Approximate least-squares gain choice
self.kg = self.kg - (self.kg.dot(self.ku.dot(self.Yuf.T.dot(self.Yuf))).dot(self.kg))*dt
# Update adaptation
self.adapt = self.adapt + self.kg.dot(Y.T.dot(tracking_err) + self.ku.dot(self.adapt_err))*dt
if self.use_RL:
self.rep = np.clip(self.rep_T, -self.repmax, self.repmax) + self.kd*tracking_err
self.rep_stack.append(self.rep)
self.rep_T = self.rep_stack.popleft()
# Update filtered prediction regressor, filtered control effort, and learning history stack
self.update_learning(q, u, dt)
# Update reference trajectory and controller life time
self.update_ref(dt)
self.time = self.time + dt
# Safety saturation of output
self.saturated = False
for i, mag in enumerate(abs(u)):
if mag > self.umax[i]:
u[i] = self.umax[i] * np.sign(u[i])
self.saturated = True
# Return effort torques
return u
########################
def update_learning(self, q, u, dt):
"""
Concurrent-learning plus (if applicable) repetitive learning.
http://arxiv.org/pdf/1507.08903.pdf
http://www.me.berkeley.edu/~horowitz/Publications_files/Papers_numbered/Journal/24j_Kaneko_repetitive_manipulators_IEEE_TRA97.pdf
"""
# Instantaneous parts of filtered prediction regressor
Yuf2_now = np.array([
[0, q[2], 0, np.cos(q[1])*(2*q[2] + q[3]), q[2] + q[3]],
[0, 0, 0, q[2]*np.cos(q[1]), q[2] + q[3]]
])
Yuf2_then = np.array([
[0, self.q0[2], 0, np.cos(self.q0[1])*(2*self.q0[2] + self.q0[3]), self.q0[2] + self.q0[3]],
[0, 0, 0, self.q0[2]*np.cos(self.q0[1]), self.q0[2] + self.q0[3]]
])
Yuf2 = Yuf2_now - Yuf2_then
# Convolutional filtering of prediction regressor and control effort...
if self.kf:
self.Yuf = self.kf*(self.Yuf1 + Yuf2)
Yuf1dot = np.array([
[np.cos(q[0]), -self.kf*q[2], np.cos(q[0] + q[1]), -self.kf*np.cos(q[1])*(2*q[2] + q[3]), -self.kf*(q[2] + q[3])],
[0, 0, np.cos(q[0] + q[1]), q[2]*((q[2] + q[3])*np.sin(q[1]) - self.kf*np.cos(q[1])), -self.kf*(q[2] + q[3])]
])
# infinite window continuous sum...
if not self.nfilt:
self.uf = self.uf + self.kf*(u - self.uf)*dt
self.Yuf1 = self.Yuf1 + (Yuf1dot - self.kf*self.Yuf1)*dt
# ...or finite window push pop
else:
self.uf_stack.append(self.kf*(u - self.uf)*dt)
self.uf = (self.uf - self.uf_stack.popleft()) + self.uf_stack[-1]
self.Yuf1_stack.append((Yuf1dot - self.kf*self.Yuf1)*dt)
self.Yuf1 = (self.Yuf1 - self.Yuf1_stack.popleft()) + self.Yuf1_stack[-1]
self.q_stack.append(q)
self.q0 = self.q_stack.popleft()
# ...or integral filtering of prediction regressor and control effort if kf = 0
else:
self.Yuf = self.Yuf1 + Yuf2
Yuf1dot = np.array([
[np.cos(q[0]), 0, np.cos(q[0] + q[1]), 0, 0],
[0, 0, np.cos(q[0] + q[1]), q[2]*(q[2] + q[3])*np.sin(q[1]), 0]
])
# infinite window continuous sum...
if not self.nfilt:
self.uf = self.uf + u*dt
self.Yuf1 = self.Yuf1 + Yuf1dot*dt
# ...or finite window push pop
else:
self.uf_stack.append(u*dt)
self.uf = (self.uf - self.uf_stack.popleft()) + self.uf_stack[-1]
self.Yuf1_stack.append(Yuf1dot*dt)
self.Yuf1 = (self.Yuf1 - self.Yuf1_stack.popleft()) + self.Yuf1_stack[-1]
self.q_stack.append(q)
self.q0 = self.q_stack.popleft()
# If stack size is > 0 then use selective learning...
if self.history_size:
# Candidate data point
new_data = self.make_history_pair(self.Yuf, self.uf)
new_YY = self.Yuf.T.dot(self.Yuf)
# If buffer is full...
if self.time > dt*self.history_size:
# Space for storing minimum eigenvalues during new data point testing
eig_mins = np.zeros(self.history_size)
# YY_sum if we add new data but don't remove any
extended_sum = self.YY_sum + new_YY
# Test all possible insertions of the new data
for i in xrange(self.history_size):
candidate_sum = extended_sum - self.YY_stack[i]
try:
assert np.isfinite(candidate_sum[0, 0])
eig_mins[i] = npl.eigvalsh(candidate_sum)[0]
except (npl.LinAlgError, AssertionError):
print("ADAPTATION UNSTABLE: try a smaller kg (or pick kg='LS'), or try a smaller stack_size.")
self.kill = True
return 0
# Take best possible insertion if it raises the minimum eigenvalue of our current stack
hotseat = np.argmax(eig_mins)
if eig_mins[hotseat] > self.history_eig and not self.saturated:
# Print if wisdom has increased significantly
if eig_mins[hotseat] - self.history_eig > 0.001:
print('Significant: {} @ time: {}'.format(np.round(self.history_eig*100, 1), self.time))
# Update history
self.history_stack[hotseat] = new_data
self.history_eig = eig_mins[hotseat]
self.YY_sum = extended_sum - self.YY_stack[hotseat]
self.YY_stack[hotseat] = new_YY
# ...until then just learn regardless
else:
self.history_stack.append(new_data)
self.history_stack.popleft()
self.YY_stack.append(new_YY)
self.YY_sum = (self.YY_sum - self.YY_stack.popleft()) + new_YY
print('Buffering @ time: {}'.format(self.time))
# Update estimated adaptation error
self.adapt_err = np.zeros(self.nparams)
for i, pair in enumerate(self.history_stack):
self.adapt_err = self.adapt_err + pair['Yi'].T.dot(pair['ui'] - pair['Yi'].dot(self.adapt))
# ...otherwise just use newest data point ("composite adaptation")
else:
self.adapt_err = self.Yuf.T.dot(self.uf - self.Yuf.dot(self.adapt))
# Solve for system parameters using dynamic parameter estimates, taking a great guess at g
if all(np.around(abs(self.adapt), 2)):
self.Lest = 9.81 * abs(np.array([self.adapt[1] / self.adapt[0], self.adapt[4] / self.adapt[2]]))
self.mest[1] = abs(self.adapt[4] / self.Lest[1]**2)
self.mest[0] = abs((self.adapt[1] / self.Lest[0]**2) - self.mest[1])
########################
def make_history_pair(self, Yi, ui):
"""
Creates a history pair as a dictionary containing keys 'Yi' and 'ui',
which are the filtered regressor and filtered effort for that instant.
"""
return {'Yi': Yi, 'ui': ui}
########################
def update_ref(self, dt):
"""
Updates the reference state qref depending on the
settings created in set_path. In every case, a
spring-damper tuned to vmax and amax is used to
generate the profile between each discontinuous target.
'train': sequence of random joint-space configurations
'waypoint': a single end-effector-space waypoint
'random': sequence of random 'waypoint's
'cycle': switching between two 'waypoint's at Tcycle time
"""
self.path_time = self.path_time + dt
if self.path_type == 'train':
Eref = self.target[:2] - self.qref[:2]
Erefdot = -self.qref[2:]
uref = self.kp*Eref + self.kd*Erefdot
self.qref = self.qref + self.reference_dynamics(self.qref, uref)*dt
if self.path_time > self.Tcycle:
self.set_path(self.qref, 2*np.pi*(np.random.rand(2) - 0.5), 'train', dt)
elif self.path_type in ['waypoint', 'random']:
target_q = self.kinem_reverse(np.concatenate((self.target, [0, 0])), self.qref)[:2]
Eref = target_q[:2] - self.qref[:2]
Erefdot = -self.qref[2:]
uref = self.kp*Eref + self.kd*Erefdot
self.qref = self.qref + self.reference_dynamics(self.qref, uref)*dt
if self.path_type == 'random' and self.path_time > self.Tcycle:
searching = True
while searching:
target = sum(self.Lest)*(np.random.rand(2) - 0.5)
if (all(np.around(abs(self.Lest), 5)) and
abs((npl.norm(target)**2 - self.Lest[0]**2 - self.Lest[1]**2) / (2*self.Lest[0]*self.Lest[1])) <= 1 and
npl.norm(target - self.target) > 1):
searching = False
self.set_path(self.qref, target, 'random', dt)
elif self.path_type == 'cycle':
Eref = self.target[:2] - self.qref[:2]
Erefdot = -self.qref[2:]
uref = self.kp*Eref + self.kd*Erefdot
self.qref = self.qref + self.reference_dynamics(self.qref, uref)*dt
if self.path_time > self.Tcycle:
self.set_path(self.qref, -self.target, 'cycle', dt)
else:
raise ValueError("Invalid path_type.")
########################
def reference_dynamics(self, qref, uref):
"""
Computes reference state derivative (qrefdot).
Takes reference state (qref) and reference control input (uref).
Spring-damper model tuned to vmax (terminal velocity) and amax (saturation).
"""
# Imposed actuator saturation
for i, mag in enumerate(abs(uref)):
if mag > self.umaxref[i]:
uref[i] = self.umaxref[i] * np.sign(uref[i])
# Simple linear evolution
return np.concatenate((qref[2:] , (uref - self.dref*qref[2:]) / self.mref))
########################
def kinem_reverse(self, x, qlast=None):
"""
Given some end effector state x, solves for the corresponding joint state q.
Optionally uses the last joint state qlast to decide on the closest new q solution.
"""
if all(np.around(abs(self.Lest), 5)):
c2 = (npl.norm(x[:2])**2 - self.Lest[0]**2 - self.Lest[1]**2) / (2*self.Lest[0]*self.Lest[1])
else:
c2 = (npl.norm(x[:2])**2 - 2) / 2
s2a = np.real(sqrt(1 - c2**2))
s2b = -s2a
Jp = np.array([[self.Lest[0] + self.Lest[1]*c2, -self.Lest[1]*s2a],
[self.Lest[1]*s2a, self.Lest[0] + self.Lest[1]*c2]
])
if abs(c2) > 1 or np.isclose(npl.det(Jp), 0):
ta = 2*np.pi*(np.random.rand(2)-0.5)
tb = 2*np.pi*(np.random.rand(2)-0.5)
else:
c1a, s1a = npl.inv(Jp).dot(x[:2])
c1b, s1b = npl.inv(Jp.T).dot(x[:2])
ta = np.array([np.arctan2(s1a, c1a), np.arctan2(s2a, c2)])
tb = np.array([np.arctan2(s1b, c1b), np.arctan2(s2b, c2)])
if qlast is None or npl.norm(ta-qlast[:2]) < npl.norm(tb-qlast[:2]):
t = ta
else:
t = tb
Jv = np.array([[-(self.Lest[0]*np.sin(t[0]) + self.Lest[1]*np.sin(t[0]+t[1])), -self.Lest[1]*np.sin(t[0]+t[1])],
[self.Lest[0]*np.cos(t[0]) + self.Lest[1]*np.cos(t[0]+t[1]), self.Lest[1]*np.cos(t[0]+t[1])]
])
if np.isclose(npl.det(Jv), 0):
w = np.zeros(2)
else:
w = npl.inv(Jv).dot(x[2:])
return np.concatenate((t, w))
| {
"content_hash": "bf29819252db9539b235014f1dbced5f",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 190,
"avg_line_length": 33.66737739872068,
"alnum_prop": 0.607726409119696,
"repo_name": "jnez71/adaptive_control",
"id": "7826ab4385b3d9e837446af882716b297a4b1aea",
"size": "15790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2link/control_2link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "35900"
},
{
"name": "Python",
"bytes": "76331"
}
],
"symlink_target": ""
} |
import numpy as np
import os
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
import time
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("batch_size", 10, "The batch size to train")
flags.DEFINE_integer("epoch_number", 10, "Number of epochs to run trainer")
flags.DEFINE_integer("steps_to_validate", 1,
"Steps to validate and print loss")
flags.DEFINE_string("checkpoint_dir", "./checkpoint/",
"indicates the checkpoint dirctory")
flags.DEFINE_string("model_path", "./model/", "The export path of the model")
flags.DEFINE_integer("export_version", 1, "The version number of the model")
def main():
# Define training data
x = np.ones(FLAGS.batch_size)
y = np.ones(FLAGS.batch_size)
# Define the model
X = tf.placeholder(tf.float32, shape=[None])
Y = tf.placeholder(tf.float32, shape=[None])
w = tf.Variable(1.0, name="weight")
b = tf.Variable(1.0, name="bias")
loss = tf.square(Y - tf.mul(X, w) - b)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
predict_op = tf.mul(X, w) + b
saver = tf.train.Saver()
checkpoint_dir = FLAGS.checkpoint_dir
checkpoint_file = checkpoint_dir + "/checkpoint.ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Start the session
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Continue training from the model {}".format(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
# Start training
start_time = time.time()
for epoch in range(FLAGS.epoch_number):
sess.run(train_op, feed_dict={X: x, Y: y})
# Start validating
if epoch % FLAGS.steps_to_validate == 0:
end_time = time.time()
print("[{}] Epoch: {}".format(end_time - start_time, epoch))
saver.save(sess, checkpoint_file)
start_time = end_time
# Print model variables
w_value, b_value = sess.run([w, b])
print("The model of w: {}, b: {}".format(w_value, b_value))
# Export the model
print("Exporting trained model to {}".format(FLAGS.model_path))
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
named_graph_signatures={
'inputs': exporter.generic_signature({"features": X}),
'outputs': exporter.generic_signature({"prediction": predict_op})
})
model_exporter.export(FLAGS.model_path, tf.constant(FLAGS.export_version), sess)
print 'Done exporting!'
if __name__ == "__main__":
main()
| {
"content_hash": "8a2e2cfc1a82658f1ffe859db220a339",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 85,
"avg_line_length": 34.90909090909091,
"alnum_prop": 0.6618303571428571,
"repo_name": "tobegit3hub/deep_recommend_system",
"id": "5094a741ad4b7747c9d56467cc3af05cf2fae3c2",
"size": "2711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minimal_model/train.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6416"
},
{
"name": "Go",
"bytes": "6298"
},
{
"name": "Java",
"bytes": "15516"
},
{
"name": "Protocol Buffer",
"bytes": "561281"
},
{
"name": "Python",
"bytes": "96803"
},
{
"name": "Scala",
"bytes": "6980"
},
{
"name": "Shell",
"bytes": "1464"
}
],
"symlink_target": ""
} |
import requests
from urllib.parse import urlencode, urljoin
from collections import namedtuple
from .exceptions import BankIdError
Token = namedtuple('Token', ['access_token', 'refresh_token', 'expires_in', 'token_type'])
class BaseBankIdClient(object):
"""
Base BankID client class, which contains common functionality for all BankID providers.
Constructor expects a `client_id` and `client_secret` for all providers. Some providers may introduce
additional arguments. Optionally following keyword arguments may be provided:
authorization_base_url: URL to be used for authorization grant step
api_base_url: Base URL for the API requests not related to authorization step
Public methods may raise `BankIdError` exception for specific BankID errors.
"""
default_authorization_base_url = NotImplemented
default_api_base_url = NotImplemented
token_endpoint = NotImplemented
def __init__(self, client_id, client_secret, **kwargs):
self.client_id = client_id
self.client_secret = client_secret
self.authorization_base_url = kwargs.get('authorization_base_url', self.default_authorization_base_url)
self.api_base_url = kwargs.get('api_base_url', self.default_api_base_url)
def authorization_url(self, redirect_url):
"""
Returns authorization grant step URL with `redirect_url` encoded into it to be returned to after auth.
"""
args = (
('client_id', self.client_id),
('redirect_uri', redirect_url),
('response_type', 'code'),
)
return '{}?{}'.format(self.authorization_base_url, urlencode(args))
def retrieve_access_token(self, code, redirect_url):
"""
Returns `Token` instance with OAuth2 access and refresh tokens obtained by the given `code`.
`redirect_url` MUST match the one used for `authorization_url`.
"""
response = requests.post(urljoin(self.api_base_url, self.token_endpoint), data={
'code': code,
'client_id': self.client_id,
'client_secret': self._client_secret(code),
'redirect_uri': redirect_url,
'grant_type': 'authorization_code'
})
if response.status_code == requests.codes.ok:
data = response.json()
return Token(data['access_token'], data['refresh_token'], data['expires_in'], data['token_type'])
else:
self._handle_errors(response)
def refresh_access_token(self, token):
"""
Refreshes the OAuth2 access token and returns a new one.
Note: can be used only once for a new token. Refreshed token does not contain a `refresh_token` anymore.
"""
assert token.refresh_token is not None
response = requests.post(urljoin(self.api_base_url, self.token_endpoint), data={
'client_id': self.client_id,
'client_secret': self._client_secret(token.refresh_token),
'refresh_token': token.refresh_token,
'grant_type': 'refresh_token'
})
if response.status_code == requests.codes.ok:
data = response.json()
return Token(data['access_token'], None, data['expires_in'], data['token_type'])
else:
self._handle_errors(response)
def user_info(self, token, declaration):
"""
Calls the BankID provider to obtain the user information by given `token` and fields `declaration`.
"""
raise NotImplementedError
def _client_secret(self, code):
return self.client_secret
def _handle_errors(self, response):
try:
error = response.json()
raise BankIdError(error.get('error'), error.get('error_description'))
except ValueError:
raise BankIdError(code=response.status_code, description='Unknown error')
| {
"content_hash": "cfccb7b7f9bb440398a509213b889396",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 112,
"avg_line_length": 41.935483870967744,
"alnum_prop": 0.64,
"repo_name": "dchaplinsky/badparking.in.ua",
"id": "444cd8d544d8f5364cc23ccb3778185d8ce8129c",
"size": "3900",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "badparking/profiles/bankid/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81819"
}
],
"symlink_target": ""
} |
"""Various classes representing distributed values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import weakref
import six
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
def _devices_match(d1, d2):
return device_util.canonicalize(d1) == device_util.canonicalize(d2)
class DeviceMap(object):
"""A mapping of replicas & logical device ids to devices."""
@property
def all_devices(self):
"""Returns a tuple of strings with all devices in this DeviceMap."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def devices_by_replica(self):
"""Returns a tuple `t` where `t[replica]` is the devices for `replica`."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def num_logical_devices(self):
"""Count of the number of devices each replica may be defined across."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def num_replicas_in_graph(self):
"""Number of replicas defined in this graph."""
raise NotImplementedError("Required for DeviceMap implementations.")
def logical_device_from_values(self, values):
"""Returns the logical device index `values` is on."""
raise NotImplementedError("Required for DeviceMap implementations.")
def logical_to_actual_devices(self, logical_device_id):
"""Returns sequence of `num_replicas_in_graph` devices."""
raise NotImplementedError("Required for DeviceMap implementations.")
def select_for_current_replica(self, values, replica_context):
"""Select the element of `values` for the current replica."""
raise NotImplementedError("Required for DeviceMap implementations.")
def replica_for_device(self, device):
"""Return the replica id containing `device`."""
raise NotImplementedError("Required for DeviceMap implementations.")
def select_for_device(self, values, device):
"""Select the element of `values` to access from `device`."""
raise NotImplementedError("Required for DeviceMap implementations.")
def is_device_in_replica(self, device, replica_id):
"""Returns whether `device` is a member of replica `replica_id`."""
raise NotImplementedError("Required for DeviceMap implementations.")
class SingleDeviceMap(DeviceMap):
"""A device map for 1 non-computation device.
Use `SingleDeviceMap` when the device does not correspond to some replica of
the computation. For computation devices, use `ReplicaDeviceMap` below (even
if there is only a single device in the map).
"""
def __init__(self, device):
"""Initialize a `SingleDeviceMap`.
Args:
device: A string device.
"""
assert isinstance(device, six.string_types)
self._device = device_util.canonicalize(device)
self._devices = (self._device,)
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
raise ValueError("SingleDeviceMap not indexed by replicas")
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return 1
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
assert len(values) == 1
del replica_context
return values[0]
def replica_for_device(self, device):
raise ValueError("SingleDeviceMap not indexed by replicas")
def select_for_device(self, values, device):
assert len(values) == 1
if self._device != device:
raise ValueError("Device %s not found in %s (current device %s)" %
(device, self._devices, device_util.current()))
return values[0]
def is_device_in_replica(self, device, replica_id):
raise ValueError("SingleDeviceMap not indexed by replicas")
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._device)
class ReplicaDeviceMap(DeviceMap):
"""A device map for 1 device per replica."""
def __init__(self, devices):
"""Initialize a `ReplicaDeviceMap`.
Args:
devices: `devices[i]` is the string device for replica `i`.
"""
self._devices = tuple(device_util.canonicalize(d) for d in devices)
if len(set(self._devices)) != len(self._devices):
raise ValueError("Duplicate devices in %s, after canonicalization: %s" %
(devices, self._devices))
self._device_to_replica = {d: r for r, d in enumerate(self._devices)}
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
return ((d,) for d in self._devices)
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return len(self._devices)
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
assert len(values) == len(self._devices)
replica_id = replica_context.replica_id_in_sync_group
if not isinstance(replica_id, int):
replica_id = tensor_util.constant_value(replica_id)
if replica_id is None:
replica_id = 0
return values[replica_id]
def replica_for_device(self, device):
return self._device_to_replica.get(device)
def select_for_device(self, values, device):
assert len(values) == len(self._devices)
replica_id = self._device_to_replica.get(device)
if replica_id is None:
raise ValueError("Device %s not found in %s (current device %s)" %
(device, self._devices, device_util.current()))
return values[replica_id]
def is_device_in_replica(self, device, replica_id):
return _devices_match(device, self._devices[replica_id])
def __str__(self):
return "[%s]" % (", ".join(self._devices))
def __repr__(self):
return "%s([%s])" % (self.__class__.__name__,
", ".join(repr(d) for d in self._devices))
LogicalDeviceSpec = collections.namedtuple(
"LogicalDeviceSpec", ("device_map", "logical_device"))
class WorkerDeviceMap(DeviceMap):
"""A device map for one value per worker."""
def __init__(self, devices, num_replicas_per_worker):
"""Initialize a `WorkerDeviceMap`.
Args:
devices: `devices[i]` is the string device for worker `i` in in-graph
relication case; devices is single-element list for its corresponding
worker in between-graph case.
num_replicas_per_worker: number of replicas per worker, useful in in-graph
replication case.
"""
self._devices = tuple(device_util.canonicalize(d) for d in devices)
if len(set(self._devices)) != len(self._devices):
raise ValueError("Duplicate devices in %s, after canonicalization: %s" %
(devices, self._devices))
self._num_replicas_per_worker = num_replicas_per_worker
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
raise ValueError("`WorkerDeviceMap` is not indexed by replicas")
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return len(self._devices)
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
return values[replica_context.replica_id_in_sync_group //
self._num_replicas_per_worker]
def replica_for_device(self, device):
raise ValueError("`WorkerDeviceMap` not indexed by replicas")
def select_for_device(self, values, device):
# TODO(yuefengz): this should map from any device to the value on its
# corresponding worker.
return values[self._devices.index(device_util.canonicalize(device))]
def is_device_in_replica(self, device, replica_id):
raise ValueError("WorkerDeviceMap not indexed by replicas")
def __repr__(self):
return "%s(%r, num_replicas_per_worker=%d)" % (
self.__class__.__name__, self._devices, self._num_replicas_per_worker)
class DistributedValues(object):
"""Holds a map from device to values. Either PerReplica or Mirrored."""
def __init__(self, device_map, values, logical_device=None):
assert isinstance(device_map, DeviceMap)
self._device_map = device_map
self._values = tuple(values)
if logical_device is None:
logical_device = device_map.logical_device_from_values(self._values)
self._logical_device = logical_device
# TODO(josh11b): Split this into two functions, one with device, one without.
def get(self, device=None):
"""Returns the value for the current device or raises a ValueError."""
if device is None:
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
else:
device = distribute_lib.get_update_device()
if device is None:
return self._get_cross_replica()
device = device_util.canonicalize(device)
return self._device_map.select_for_device(self._values, device)
@property
def primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def devices(self):
return self._device_map.logical_to_actual_devices(self._logical_device)
@property
def logical_device(self):
return self._logical_device
@property
def device_map(self):
return self._device_map
# TODO(josh11b): Replace experimental_local_results with this?
@property
def values(self):
return self._values
@property
def is_tensor_like(self):
return all(tensor_util.is_tensor(v) for v in self._values)
def __str__(self):
devices = self.devices
assert len(self._values) == len(devices)
debug_str = ",\n".join(" %d %s: %s" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
devices = self.devices
assert len(self._values) == len(devices)
debug_repr = ",\n".join(" %d %s: %r" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
# NOTE(josh11b,apassos): It would be great if we could inspect the values this was
# initialized with and use that to generate the overloaded operators here.
# Unfortunately, Python's rules for special methods don't allow this, see
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# "if a class defines a method named __getitem__(), and x is an instance of
# this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i)."
# In particular, these special methods don't go through __getattr__, and
# it will only use those methods if they are defined in the class, not the
# object.
class DistributedDelegate(DistributedValues):
"""A map from device to values; acts as the same type as the values."""
def __getattr__(self, name):
# TODO(priyag): This needs to be made robust against pitfalls from mix use
# __getattr__ and @property. See b/120402273.
return getattr(self.get(), name)
# pylint: disable=multiple-statements
def __add__(self, o): return self.get() + o
def __radd__(self, o): return o + self.get()
def __sub__(self, o): return self.get() - o
def __rsub__(self, o): return o - self.get()
def __mul__(self, o): return self.get() * o
def __rmul__(self, o): return o * self.get()
def __truediv__(self, o): return self.get() / o
def __rtruediv__(self, o): return o / self.get()
def __floordiv__(self, o):
return self.get() // o
def __rfloordiv__(self, o): return o // self.get()
def __mod__(self, o): return self.get() % o
def __rmod__(self, o): return o % self.get()
def __lt__(self, o): return self.get() < o
def __le__(self, o): return self.get() <= o
def __gt__(self, o): return self.get() > o
def __ge__(self, o): return self.get() >= o
def __and__(self, o): return self.get() & o
def __rand__(self, o): return o & self.get()
def __or__(self, o): return self.get() | o
def __ror__(self, o): return o | self.get()
def __xor__(self, o): return self.get() ^ o
def __rxor__(self, o): return o ^ self.get()
def __getitem__(self, o): return self.get()[o]
def __pow__(self, o, modulo=None): return pow(self.get(), o, modulo)
def __rpow__(self, o): return pow(o, self.get())
def __invert__(self): return ~self.get()
def __neg__(self): return -self.get()
def __abs__(self): return abs(self.get())
def __div__(self, o):
try:
return self.get().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self.get().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self.get().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self.get().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# TODO(josh11b): Even more operator overloads.
class PerReplica(DistributedValues, composite_tensor.CompositeTensor):
"""Holds a map from device to unsynchronized values."""
def _to_components(self):
replica_context = distribution_strategy_context.get_replica_context()
if replica_context is not None and replica_context.num_replicas_in_sync > 1:
raise ValueError(
"Flattening a PerReplica to components is not supported in replica "
"context.")
return self._values
def _component_metadata(self):
return self._device_map, self._logical_device
@classmethod
def _from_components(cls, components, metadata):
device_map, logical_device = metadata
return PerReplica(device_map, components, logical_device=logical_device)
def _is_graph_tensor(self):
return any(hasattr(t, "graph") for t in self._values)
def _shape_invariant_to_components(self, shape=None):
if shape is None:
return tuple(v.shape for v in self._values)
else:
return tuple(shape for _ in self._values)
# Note that unlike PerReplica, Mirrored values inherit from
# DistributedDelegate and so can be used directly in cross-replica mode.
# TODO(tomhennigan) Should this extend CompositeTensor?
class Mirrored(DistributedDelegate):
"""Holds a map from device to values which are kept in sync."""
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return self.primary
return self._values[replica_id]
def _as_graph_element(self):
obj = self.get()
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return obj
def _assign_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign(array_ops.identity(tensor))
def _assert_strategy(strategy):
if not distribution_strategy_context.has_strategy():
raise RuntimeError(
'Need to be inside "with strategy.scope()" for %s' %
(strategy,))
current_strategy = distribution_strategy_context.get_strategy()
if current_strategy is not strategy:
raise RuntimeError(
"Mixing different tf.distribute.Strategy objects: %s is not %s" %
(current_strategy, strategy))
@contextlib.contextmanager
def _enter_or_assert_strategy(strategy):
if not distribution_strategy_context.has_strategy():
with strategy.scope():
yield
else:
_assert_strategy(strategy)
yield
DistributedVarOp = collections.namedtuple(
"DistributedVarOp", ["name", "graph", "type"])
class DistributedVariable(DistributedDelegate, variables_lib.AbstractVariable):
"""Holds a map from device to variables."""
# TODO(josh11b): Support changing the set of variables if e.g. if new
# devices are joining or a device is to leave.
def __init__(self, strategy, device_map, values, logical_device=None):
self._distribute_strategy = strategy
super(DistributedVariable, self).__init__(
device_map, values, logical_device=logical_device)
self._common_name = self.primary.name.split(":")[0]
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in values:
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
# tf.keras keeps track of variables initialized using this attribute. When
# tf.keras gets the default session, it initializes all uninitialized vars.
# We need to make _keras_initialized a member of DistributedVariable because
# without this it will use `__getattr__` which will delegate to a component
# variable.
self._keras_initialized = False
# Typically, a `DistributedVariable`'s initializer is composed of the
# initializers of the components variables. However, in some cases, such as
# when restoring from a checkpoint, we may set the _initializer_op
# property on the entire `DistributedVariable`.
self._initializer_op = None
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
result = self.primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, self._values[-1].is_initialized(),
name=name)
return result
@property
def initializer(self):
if self._initializer_op:
init_op = self._initializer_op
else:
# return grouped ops of all the var initializations of component values of
# the mirrored variable
init_op = control_flow_ops.group(tuple(
v.initializer for v in self._values))
return init_op
def _get_closest(self):
"""Return member in the same replica if possible, else the primary."""
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
device = distribute_lib.get_update_device()
if device is None:
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return self.primary
return self._values[replica_id]
def initialized_value(self):
return self._get_closest().initialized_value()
@property
def initial_value(self):
return self._get_closest().initial_value
@property
def graph(self):
return self.primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self.primary._unique_id # pylint: disable=protected-access
@property
def _graph_key(self):
"""Lets Optimizers know which graph this variable is from."""
return self.primary._graph_key # pylint: disable=protected-access
@property
def name(self):
return self.primary.name
@property
def dtype(self):
return self.primary.dtype
@property
def shape(self):
return self.primary.shape
@property
def handle(self):
device = None
replica_context = distribution_strategy_context.get_replica_context()
if replica_context is None:
device = distribute_lib.get_update_device()
if device is None:
raise ValueError("`handle` is not available outside the replica context"
" or a `tf.distribute.Strategy.update()` call.")
return self.get(device=device).handle
def eval(self, session=None):
return self._get_closest().eval(session)
@property
def _save_slice_info(self):
return self.primary._save_slice_info # pylint: disable=protected-access
def _get_save_slice_info(self):
return self.primary._get_save_slice_info() # pylint: disable=protected-access
def _set_save_slice_info(self, save_slice_info):
for v in self._values:
v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access
@property
def device(self):
return self._get_closest().device
@property
def trainable(self):
return self.primary.trainable
@property
def distribute_strategy(self):
return self._distribute_strategy
def get_shape(self):
return self.primary.get_shape()
def to_proto(self, export_scope=None):
return self.primary.to_proto(export_scope=export_scope)
@property
def op(self):
# We want cross-replica code that does some var.op.X calls
# to work (even if the current device isn't in self.devices), but
# other uses of var.op in a cross-replica context to fail.
if distribution_strategy_context.in_cross_replica_context():
return DistributedVarOp(self.primary.op.name,
self.primary.op.graph,
self.primary.op.type)
return self.get().op
@property
def _in_graph_mode(self):
return self.primary._in_graph_mode # pylint: disable=protected-access
def read_value(self):
return self._distribute_strategy.extended.read_var(self)
def value(self):
return self._get_closest().value()
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
ops.register_dense_tensor_like_type(DistributedVariable)
def _validate_colocate_extended(v, extended):
variable_strategy = v._distribute_strategy # pylint: disable=protected-access
if variable_strategy.extended is not extended:
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not %s created in scope: %s" %
(v, variable_strategy))
def validate_colocate_distributed_variable(v, extended):
if not isinstance(v, DistributedVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate_tpu_variable(v, extended):
if not isinstance(v, TPUMirroredVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate(v, extended):
if not hasattr(v, "_distribute_strategy"):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def _apply_aggregation(strategy, value, aggregation, destinations):
if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return strategy.extended.broadcast_to(
strategy.experimental_local_results(value)[0],
destinations=destinations)
reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)
return strategy.extended.reduce_to(reduce_op, value, destinations)
_aggregation_error_msg = (
"You must specify an aggregation method to update a "
"{variable_type} in Replica Context. You can do so by passing "
"an explicit value for argument `aggregation` to tf.Variable(..)."
"e.g. `tf.Variable(..., aggregation=tf.VariableAggregation.SUM)`"
"`tf.VariableAggregation` lists the possible aggregation methods."
"This is required because {variable_type} should always be "
"kept in sync. When updating them or assigning to them in a "
"replica context, we automatically try to aggregate the values "
"before updating the variable. For this aggregation, we need to "
"know the aggregation method. "
"Another alternative is to not try to update such "
"{variable_type} in replica context, but in cross replica "
"context. You can enter cross replica context by calling "
"`tf.distribute.get_replica_context().merge_call(merge_fn, ..)`."
"Inside `merge_fn`, you can then update the {variable_type} "
"using `tf.distribute.StrategyExtended.update()`.")
class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable):
"""Class for defining how to restore a MirroredVariable."""
def __init__(self, mirrored_variable, primary_variable, name):
self._mirrored_variable = mirrored_variable
super(_MirroredSaveable, self).__init__(primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return control_flow_ops.group(tuple(
_assign_on_device(v.device, v, tensor)
for v in self._mirrored_variable.values))
class MirroredVariable(DistributedVariable, Mirrored):
"""Holds a map from device to variables whose values are kept in sync."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
super(MirroredVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
self._aggregation = aggregation
# The arguments to update() are automatically unwrapped so the update()
# function would normally see regular variables, not MirroredVariables.
# However, the update function can still operate on wrapped MirroredVariables
# through object members, captured arguments, etc. This is more likely in an
# update_non_slot() function (like OptimizerV2._finish), which can
# update several non-slot variables in one call.
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
update_device = distribute_lib.get_update_device()
if update_device is not None:
# We are calling an assign function on the mirrored variable in an
# update context.
v = self.get(device=update_device)
return f(v, *args, **kwargs)
# We are calling assign on the mirrored variable in cross replica
# context, use `strategy.extended.update()` to update the variable.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
_assert_replica_context(self._distribute_strategy)
# We are calling an assign function on the mirrored variable in replica
# context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function on each of the mirrored variables with the
# reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(_aggregation_error_msg.format(
variable_type="MirroredVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
return self._aggregation
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return array_ops.identity(self.primary)
return array_ops.identity(self._values[replica_id])
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self.primary._as_graph_element()
return self.get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self.primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() call.
assert not as_ref
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
def _enclosing_tpu_context():
# pylint: disable=protected-access
tpu_context = ops.get_default_graph()._get_control_flow_context()
# pylint: enable=protected-access
while tpu_context is not None and not isinstance(
tpu_context, control_flow_ops.XLAControlFlowContext):
tpu_context = tpu_context.outer_context
return tpu_context
# TODO(jhseu): Deduplicate code. We copy code because we don't want to
# inherit from DistributedDelegate. DistributedDelegate will not work in a
# tpu.replicate() because it assumes that you're in a device context where you
# can operate on a single version of the variable, but a tpu.replicate()
# operates on all variables and is replicated during a rewrite pass.
class TPUMirroredVariable(variables_lib.Variable):
"""Holds a map from device to TPU variables whose values are kept in sync."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
assert isinstance(device_map, DeviceMap)
self._distribute_strategy = strategy
self._device_map = device_map
self._values = tuple(values)
if logical_device is None:
logical_device = device_map.logical_device_from_values(self._values)
self._logical_device = logical_device
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in self._values:
v._mirrored_container = weakref.ref(self) # pylint: disable=protected-access
self._common_name = self.primary.name.split(":")[0]
# Handle id is needed for get_replicated_var_handle to cache the variables
# correctly since in eager mode different variables can have the same name.
if context.executing_eagerly():
self._handle_id = self._common_name + "_" + str(id(self.primary))
else:
self._handle_id = self._common_name
self._aggregation = aggregation
# Needed for GradientTape
self._trainable = self.primary.trainable
# Typically like `DistributedVariable`, a `TPUMirroredVariable`'s
# initializer is composed of the initializers of the components variables.
# However, in some cases, such as when restoring from a checkpoint, we may
# set the _initializer_op property on the entire `TPUMirroredVariable`.
self._initializer_op = None
def _get(self, device=None):
"""Returns the value for the current device or raises a ValueError."""
if device is None:
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
else:
device = distribute_lib.get_update_device()
if device is None:
return self._get_cross_replica()
device = device_util.canonicalize(device)
return self._device_map.select_for_device(self._values, device)
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
def initialized_value(self):
return self.primary.initialized_value()
@property
def initial_value(self):
return self.primary.initial_value
@property
def primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def devices(self):
return self._device_map.logical_to_actual_devices(self._logical_device)
@property
def logical_device(self):
return self._logical_device
@property
def device_map(self):
return self._device_map
# TODO(josh11b): Replace experimental_local_results with this?
@property
def values(self):
return self._values
@property
def distribute_strategy(self):
return self._distribute_strategy
# pylint: disable=multiple-statements
def __add__(self, o): return self.read_value() + o
def __radd__(self, o): return o + self.read_value()
def __sub__(self, o): return self.read_value() - o
def __rsub__(self, o): return o - self.read_value()
def __mul__(self, o): return self.read_value() * o
def __rmul__(self, o): return o * self.read_value()
def __truediv__(self, o): return self.read_value() / o
def __rtruediv__(self, o): return o / self.read_value()
def __floordiv__(self, o): return self.read_value() // o
def __rfloordiv__(self, o): return o // self.read_value()
def __mod__(self, o): return self.read_value() % o
def __rmod__(self, o): return o % self.read_value()
def __lt__(self, o): return self.read_value() < o
def __le__(self, o): return self.read_value() <= o
def __gt__(self, o): return self.read_value() > o
def __ge__(self, o): return self.read_value() >= o
def __and__(self, o): return self.read_value() & o
def __rand__(self, o): return o & self.read_value()
def __or__(self, o): return self.read_value() | o
def __ror__(self, o): return o | self.read_value()
def __xor__(self, o): return self.read_value() ^ o
def __rxor__(self, o): return o ^ self.read_value()
def __getitem__(self, o): return self.read_value()[o]
def __pow__(self, o, modulo=None): return pow(self.read_value(), o, modulo)
def __rpow__(self, o): return pow(o, self.read_value())
def __invert__(self): return ~self.read_value()
def __neg__(self): return -self.read_value()
def __abs__(self): return abs(self.read_value())
def __div__(self, o):
try:
return self.read_value().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self.read_value().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self.read_value().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self.read_value().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
devices = self.devices
debug_str = ",\n".join(" %d %s: %s" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
devices = self.devices
debug_repr = ",\n".join(" %d %s: %r" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
@property
def handle(self):
# If we're in a tpu.rewrite(), return the replicated handle.
tpu_context = _enclosing_tpu_context()
if tpu_context is not None:
return tpu_context.get_replicated_var_handle(
self._handle_id, self._values)
device = distribute_lib.get_update_device()
if device is None:
return self.primary.handle
return self._get(device=device).handle
@property
def device(self):
return self.handle.device
def eval(self, session=None):
return self.primary.eval(session)
# The arguments to update() are automatically unwrapped so the update()
# function would normally see regular variables, not MirroredVariables.
# However, the update function can still operate on wrapped MirroredVariables
# through object members, captured arguments, etc. This is more likely in an
# update_non_slot() function (like OptimizerV2._finish), which can
# update several non-slot variables in one call.
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
if _enclosing_tpu_context() is not None:
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
update_device = distribute_lib.get_update_device()
# We are calling update on the mirrored variable in cross replica
# context.
if update_device is not None:
# We are calling an assign function on the mirrored variable in cross
# replica context.
v = self._get(device=update_device)
return f(v, *args, **kwargs)
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
_assert_replica_context(self._distribute_strategy)
# We are calling an assign function on the mirrored variable in replica
# context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function on each of the mirrored variables with the
# reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(_aggregation_error_msg.format(
variable_type="TPUMirroredVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=args, kwargs=kwargs)
@contextlib.contextmanager
def _handle_graph(self, handle):
# Note: might have an eager tensor but not be executing eagerly when
# building functions.
if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor)
or ops.has_default_graph()):
yield
else:
with handle.graph.as_default():
yield
@property
def trainable(self):
return self._trainable
def _read_variable_op(self, parent_op=None):
if self.trainable:
tape.variable_accessed(self)
if parent_op is not None:
with ops.control_dependencies([parent_op]):
return gen_resource_variable_ops.read_variable_op(
self.handle, self.dtype)
return gen_resource_variable_ops.read_variable_op(
self.handle, self.dtype)
def read_value(self):
return self._read_variable_op()
def assign_sub(self, *args, **kwargs):
def assign_sub_fn(var, delta, *ar, **kw):
del ar
name = kw.pop("name", None)
read_value = kw.pop("read_value", True)
with self._handle_graph(var.handle):
op = gen_resource_variable_ops.assign_sub_variable_op(
var.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op(parent_op=op)
return op
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
def assign_add_fn(var, delta, *ar, **kw):
del ar
name = kw.pop("name", None)
read_value = kw.pop("read_value", True)
with self._handle_graph(var.handle):
op = gen_resource_variable_ops.assign_add_variable_op(
var.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op(parent_op=op)
return op
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
def assign_fn(var, value, *ar, **kw):
del ar
name = kw.pop("name", None)
read_value = kw.pop("read_value", True)
with self._handle_graph(var.handle):
op = gen_resource_variable_ops.assign_variable_op(
var.handle, ops.convert_to_tensor(value, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op(parent_op=op)
return op
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
return self._aggregation
@property
def constraint(self):
return self.primary.constraint
@property
def initializer(self):
if self._initializer_op:
init_op = self._initializer_op
else:
init_op = control_flow_ops.group(tuple(
v.initializer for v in self._values))
return init_op
@property
def graph(self):
return self.primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self.primary._unique_id # pylint: disable=protected-access
@property
def name(self):
return self.primary.name
@property
def dtype(self):
return self.primary.dtype
@property
def shape(self):
return self.primary.shape
def get_shape(self):
return self.primary.get_shape()
def to_proto(self, export_scope=None):
return self.primary.to_proto(export_scope=export_scope)
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica = self._device_map.replica_for_device(device)
if replica is None:
return self.primary
return self._values[replica]
def _as_graph_element(self):
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
if distribution_strategy_context.in_cross_replica_context():
return self.primary._as_graph_element()
return self._get()._as_graph_element()
return None
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self.primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
# Needed to pass ResourceVariable checks.
@property
def op(self):
return self.primary.op
# pylint: disable=protected-access
@property
def _save_slice_info(self):
return self.primary._save_slice_info
def _get_save_slice_info(self):
return self.primary._get_save_slice_info()
def _set_save_slice_info(self, save_slice_info):
return self.primary._set_save_slice_info(save_slice_info)
# pylint: enable=protected-access
@property
def _in_graph_mode(self):
return self.primary._in_graph_mode # pylint: disable=protected-access
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
return self._get()._dense_var_to_tensor(dtype, name, as_ref)
# pylint: enable=protected-access
if dtype is not None and dtype != self.dtype:
return math_ops.cast(self.read_value(), dtype)
if as_ref:
return self.handle
else:
return self.read_value()
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
# TODO(jhseu): Do we need TPU context implementation?
result = self.primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, self._values[-1].is_initialized(),
name=name)
return result
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_tpu_mirrored(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(TPUMirroredVariable,
_tensor_conversion_tpu_mirrored)
ops.register_dense_tensor_like_type(TPUMirroredVariable)
class _SyncOnReadSaveable(saver.BaseSaverBuilder.SaveableObject):
"""Class for defining how to restore a SyncOnReadVariable."""
def __init__(self, sync_on_read_variable, name):
self._sync_on_read_variable = sync_on_read_variable
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access
return strategy.extended.read_var(sync_on_read_variable)
spec = saver.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=sync_on_read_variable.dtype,
device=sync_on_read_variable.primary.device)
super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return self._sync_on_read_variable.assign(tensor)
def _assert_replica_context(strategy):
replica_context = distribution_strategy_context.get_replica_context()
if not replica_context:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
if replica_context.strategy is not strategy:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
class SyncOnReadVariable(DistributedVariable, PerReplica):
"""Holds a map from device to variables whose values are reduced on save."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
self._aggregation = aggregation
super(SyncOnReadVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
def assign_sub(self, *args, **kwargs):
_assert_replica_context(self._distribute_strategy)
return self.get().assign_sub(*args, **kwargs)
def assign_add(self, *args, **kwargs):
_assert_replica_context(self._distribute_strategy)
return self.get().assign_add(*args, **kwargs)
def assign(self, *args, **kwargs):
if distribution_strategy_context.in_cross_replica_context():
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor = args[0]
if self._aggregation == vs.VariableAggregation.SUM:
tensor *= 1. / len(self.devices)
return control_flow_ops.group(tuple(
_assign_on_device(v.device, v, tensor) for v in self._values))
else:
_assert_replica_context(self._distribute_strategy)
return self.get().assign(*args, **kwargs)
@property
def aggregation(self):
return self._aggregation
def _get_cross_replica(self):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return self.primary
return self._distribute_strategy.reduce(
reduce_util.ReduceOp.from_variable_aggregation(self.aggregation), self,
axis=None)
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self._get_cross_replica()
return self.get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
`SyncOnReadVariable`s.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _SyncOnReadSaveable(self, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
# Register a conversion function for SyncOnReadVariable which allows as_ref to
# be true.
def _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False):
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(SyncOnReadVariable,
_tensor_conversion_sync_on_read)
def regroup(device_map, values, wrap_class=PerReplica):
"""Makes a nest per-replica into a nest of PerReplica/Mirrored values."""
assert isinstance(device_map, DeviceMap)
assert len(values) == device_map.num_replicas_in_graph
v0 = values[0]
if isinstance(v0, list):
for v in values[1:]:
assert isinstance(v, list)
assert len(v) == len(v0), ("len(v) == %d, len(v0) == %d, v: %s, v0: %s" %
(len(v), len(v0), v, v0))
return [regroup(device_map, tuple(v[i] for v in values), wrap_class)
for i in range(len(v0))]
if isinstance(v0, tuple):
for v in values[1:]:
assert isinstance(v, tuple)
assert len(v) == len(v0)
regrouped_tuple = tuple(
regroup(device_map, tuple(v[i] for v in values), wrap_class)
for i in range(len(v0)))
if hasattr(v0, "_fields"):
# This tuple is in fact a namedtuple! Create a new namedtuple instance
# and initialize it with the regrouped values:
assert hasattr(type(v0), "_make")
return type(v0)._make(regrouped_tuple)
else:
return regrouped_tuple
if isinstance(v0, dict):
v0keys = set(v0.keys())
for v in values[1:]:
assert isinstance(v, dict), ("v[0]: %r v[i]: %r" % (v0, v))
assert set(v.keys()) == v0keys, ("v[0].keys: %s v[i].keys: %s" %
(v0keys, set(v.keys())))
return {key: regroup(device_map, tuple(v[key] for v in values), wrap_class)
for key in v0keys}
# If exactly the same object across all devices, return it unwrapped.
same_id = True
for v in values[1:]:
if v is not v0:
same_id = False
break
# Consider three cases where same_id is true:
# * If v0 is a DistributedVariable (a MirroredVariable or
# SyncOnReadVariable, and same_id means it is the same across all
# devices), we want to return it. We check DistributedVariable
# specifically since it can look like it has a
# _distributed_container member since its members do.
# * If v0 is a member of a distributed variable, in which case
# hasattr(v0, "_distributed_container") is true, we want to
# return the DistributedVariable that contains it using the
# _distributed_container logic below. This case can trigger
# same_id when there is only one device.
# * In any other situation, same_id means we return v0.
if same_id and (isinstance(v0, DistributedVariable) or
not hasattr(v0, "_distributed_container")):
return v0
# Detect the case where each device has a parallel component of the
# same MirroredVariable (or SyncOnReadVariable). In this case we
# want to return the containing MirroredVariable, after a bunch of
# sanity checking. In particular, each component should have the
# same container, and the devices of the variables should match the
# keys of the per-replica dictionary.
if hasattr(v0, "_distributed_container"):
# pylint: disable=protected-access
assert not isinstance(v0, MirroredVariable), (
"ids = %s, values = %s" % ([id(v) for v in values], values))
assert device_map.is_device_in_replica(v0.device, 0), (
"v0.device = %s, device_map = %s" % (v0.device, device_map))
distributed_container = v0._distributed_container()
assert distributed_container is not None
for r, v in enumerate(values[1:]):
assert device_map.is_device_in_replica(v.device, r + 1), (
"v.device = %s, r = %d, device_map = %s" %
(v.device, r + 1, device_map))
assert distributed_container is v._distributed_container()
return distributed_container
# pylint: enable=protected-access
return wrap_class(device_map, values)
def select_replica(replica_id, structured):
"""Specialize a nest of regular & per-replica values for one replica."""
def _get(x):
return x.values[replica_id] if isinstance(x, DistributedValues) else x
return nest.map_structure(_get, structured)
def select_device_mirrored(device, structured):
"""Specialize a nest of regular & mirrored values for one device."""
def _get_mirrored(x):
if isinstance(x, DistributedValues):
if not isinstance(x, Mirrored):
raise TypeError(
"Expected value to be mirrored across replicas: %s in %s." %
(x, structured))
return x.get(device)
else:
return x
return nest.map_structure(_get_mirrored, structured)
def update_regroup(extended, device_map, updates, group):
"""Regroup for an update, with dependencies to ensure all updates execute."""
# TODO(josh11b): Replace "Mirrored" here with a function that does the following
# so we can avoid all these nest operations.
regrouped = regroup(device_map, updates, Mirrored)
if not group:
return nest.map_structure(extended._local_results, regrouped) # pylint: disable=protected-access
grouped_flat = []
for u in nest.flatten(regrouped):
if isinstance(u, DistributedValues):
g = extended._group(u) # pylint: disable=protected-access
if u.is_tensor_like:
# Make sure we run all updates. Without this, something like
# session.run(extended.update(...)) may only update one replica.
values = []
for d in u.devices:
with ops.device(d), ops.control_dependencies([g]):
values.append(array_ops.identity(u.get(d)))
g = Mirrored(u.device_map, values)
else:
g = u
grouped_flat.append(g)
return nest.pack_sequence_as(regrouped, grouped_flat)
def value_container(val):
"""Returns the container that this per-replica `value` belongs to.
Args:
val: A value returned by `call_for_each_replica()` or a variable
created in `scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
"""
if (hasattr(val, "_distributed_container") and
# DistributedVariable has _distributed_container defined
# but we don't want to return it.
not isinstance(val, DistributedVariable)):
container = val._distributed_container() # pylint: disable=protected-access
if container is not None:
return container
return val
class AggregatingVariable(variables_lib.Variable):
"""A wrapper around a variable that aggregates updates across replicas."""
def __init__(self, strategy, v, aggregation):
self._distribute_strategy = strategy
self._v = v
# NOTE: We don't use "_distributed_container" here because we don't want
# to trigger that code path in regroup().
v._aggregating_container = weakref.ref(self) # pylint: disable=protected-access
self._aggregation = aggregation
def get(self):
return self._v
@property
def distribute_strategy(self):
return self._distribute_strategy
def __getattr__(self, name):
return getattr(self._v, name)
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
update_device = distribute_lib.get_update_device()
if update_device is not None:
# We are calling an assign function in an update context.
return f(self._v, *args, **kwargs)
# We are calling an assign function in cross replica context, wrap it in
# an update call.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
replica_context = distribution_strategy_context.get_replica_context()
assert replica_context
# We are calling an assign function in replica context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function with the reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(_aggregation_error_msg.format(
variable_type="AggregatingVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def initializer(self):
return self._v.initializer
def initialized_value(self):
return self._v.initialized_value()
@property
def initial_value(self):
return self._v.initial_value
@property
def op(self):
return self._v.op
def read_value(self):
return self._v.read_value()
def eval(self, session=None):
return self._v.eval(session)
@property
def graph(self):
return self._v.graph
@property
def device(self):
return self._v.device
@property
def shape(self):
return self._v.shape
@property
def aggregation(self):
return self._aggregation
@property
def name(self):
return self._v.name
@property
def dtype(self):
return self._v.dtype
# TODO(josh11b): Test saving & restoring.
def _gather_saveables_for_checkpoint(self):
return {trackable.VARIABLE_VALUE_KEY: self._v}
# pylint: disable=multiple-statements
def __add__(self, o): return self._v + o
def __radd__(self, o): return o + self._v
def __sub__(self, o): return self._v - o
def __rsub__(self, o): return o - self._v
def __mul__(self, o): return self._v * o
def __rmul__(self, o): return o * self._v
def __truediv__(self, o): return self._v / o
def __rtruediv__(self, o): return o / self._v
def __floordiv__(self, o): return self._v // o
def __rfloordiv__(self, o): return o // self._v
def __mod__(self, o): return self._v % o
def __rmod__(self, o): return o % self._v
def __lt__(self, o): return self._v < o
def __le__(self, o): return self._v <= o
def __gt__(self, o): return self._v > o
def __ge__(self, o): return self._v >= o
def __and__(self, o): return self._v & o
def __rand__(self, o): return o & self._v
def __or__(self, o): return self._v | o
def __ror__(self, o): return o | self._v
def __xor__(self, o): return self._v ^ o
def __rxor__(self, o): return o ^ self._v
def __getitem__(self, o): return self._v[o]
def __pow__(self, o, modulo=None): return pow(self._v, o, modulo)
def __rpow__(self, o): return pow(o, self._v)
def __invert__(self): return ~self._v
def __neg__(self): return -self._v
def __abs__(self): return abs(self._v)
def __div__(self, o):
try:
return self._v.__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._v.__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._v.__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._v.__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
return str(self._v)
def __repr__(self):
return repr(self._v)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(
AggregatingVariable, _tensor_conversion_aggregate)
ops.register_dense_tensor_like_type(AggregatingVariable)
| {
"content_hash": "4f1b99cb9d662144cae7abb302ceae74",
"timestamp": "",
"source": "github",
"line_count": 1789,
"max_line_length": 108,
"avg_line_length": 36.2023476802683,
"alnum_prop": 0.6741500169842202,
"repo_name": "ghchinoy/tensorflow",
"id": "0b5678d4cf935676706ee92190a703c48567eb4c",
"size": "65455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
"""Constants for the ecobee integration."""
import logging
from homeassistant.components.weather import (
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
)
from homeassistant.const import Platform
_LOGGER = logging.getLogger(__package__)
DOMAIN = "ecobee"
DATA_ECOBEE_CONFIG = "ecobee_config"
CONF_INDEX = "index"
CONF_REFRESH_TOKEN = "refresh_token"
ECOBEE_MODEL_TO_NAME = {
"idtSmart": "ecobee Smart",
"idtEms": "ecobee Smart EMS",
"siSmart": "ecobee Si Smart",
"siEms": "ecobee Si EMS",
"athenaSmart": "ecobee3 Smart",
"athenaEms": "ecobee3 EMS",
"corSmart": "Carrier/Bryant Cor",
"nikeSmart": "ecobee3 lite Smart",
"nikeEms": "ecobee3 lite EMS",
"apolloSmart": "ecobee4 Smart",
"vulcanSmart": "ecobee4 Smart",
}
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.CLIMATE,
Platform.HUMIDIFIER,
Platform.SENSOR,
Platform.WEATHER,
]
MANUFACTURER = "ecobee"
# Translates ecobee API weatherSymbol to Home Assistant usable names
# https://www.ecobee.com/home/developer/api/documentation/v1/objects/WeatherForecast.shtml
ECOBEE_WEATHER_SYMBOL_TO_HASS = {
0: ATTR_CONDITION_SUNNY,
1: ATTR_CONDITION_PARTLYCLOUDY,
2: ATTR_CONDITION_PARTLYCLOUDY,
3: ATTR_CONDITION_CLOUDY,
4: ATTR_CONDITION_CLOUDY,
5: ATTR_CONDITION_CLOUDY,
6: ATTR_CONDITION_RAINY,
7: ATTR_CONDITION_SNOWY_RAINY,
8: ATTR_CONDITION_POURING,
9: ATTR_CONDITION_HAIL,
10: ATTR_CONDITION_SNOWY,
11: ATTR_CONDITION_SNOWY,
12: ATTR_CONDITION_SNOWY_RAINY,
13: "snowy-heavy",
14: ATTR_CONDITION_HAIL,
15: ATTR_CONDITION_LIGHTNING_RAINY,
16: ATTR_CONDITION_WINDY,
17: "tornado",
18: ATTR_CONDITION_FOG,
19: "hazy",
20: "hazy",
21: "hazy",
-2: None,
}
| {
"content_hash": "52c0b3b9e4d77f384e371b8d7e09c683",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 90,
"avg_line_length": 26.285714285714285,
"alnum_prop": 0.6798418972332015,
"repo_name": "GenericStudent/home-assistant",
"id": "50dd606ad254f5b6b20fb995be2089aa2e2bfb8f",
"size": "2024",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ecobee/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import datetime as dt
from psycopg2.extensions import connection
from gargbot_3000 import pictures
from tests import conftest
def assert_valid_returns(url: str, timestamp: dt.datetime, description: str) -> None:
assert url.startswith("https")
assert type(timestamp) == dt.datetime
assert description == "" or description.startswith("Her er et bilde med")
assert not description.startswith("Im so stoopid")
def test_random(conn: connection, dbx: conftest.MockDropbox) -> None:
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=None)
assert_valid_returns(url, timestamp, description)
def test_topic(conn: connection, dbx: conftest.MockDropbox) -> None:
topic = "topic1"
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=[topic])
assert_valid_returns(url, timestamp, description)
pic = next(pic for pic in conftest.pics if url.endswith(pic.path))
assert pic.topic == topic
def test_year(conn: connection, dbx: conftest.MockDropbox) -> None:
year = "2002"
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=[year])
assert_valid_returns(url, timestamp, description)
pic = next(pic for pic in conftest.pics if url.endswith(pic.path))
assert pic.taken_at.year == int(year)
def test_user(conn: connection, dbx: conftest.MockDropbox) -> None:
user = "slack_nick3"
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=[user])
assert_valid_returns(url, timestamp, description)
pic = next(pic for pic in conftest.pics if url.endswith(pic.path))
assert 3 in pic.faces
def test_user_exclusive(conn: connection, dbx: conftest.MockDropbox) -> None:
user = "slack_nick3"
exclusive_pic = "test_pic7"
# get seed that returns nonexclusive
for seed in range(1, 10):
with conn.cursor() as cursor:
cursor.execute(f"select setseed(0.{seed})")
url1, timestamp, description = pictures.get_pic(conn, dbx, arg_list=[user])
assert_valid_returns(url1, timestamp, description)
if not url1.endswith(exclusive_pic):
break
else: # no test coverage
raise Exception("could not find good seed")
with conn.cursor() as cursor:
cursor.execute(f"select setseed(0.{seed})")
url2, timestamp, description = pictures.get_pic(conn, dbx, arg_list=["kun", user])
assert_valid_returns(url2, timestamp, description)
assert url2.endswith(exclusive_pic)
def test_multiple_users(conn: connection, dbx: conftest.MockDropbox) -> None:
users = ["slack_nick11", "slack_nick3"]
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=users)
assert_valid_returns(url, timestamp, description)
pic = next(pic for pic in conftest.pics if url.endswith(pic.path))
assert {11, 3}.issubset(pic.faces), f"Wrong picture {pic}"
def test_multiple_users_exclusive(conn: connection, dbx: conftest.MockDropbox) -> None:
users = ["slack_nick2", "slack_nick3"]
exclusive_pic = "test_pic4"
# get seed that returns nonexclusive
for seed in range(0, 20):
with conn.cursor() as cursor:
cursor.execute(f"select setseed(0.{seed})")
url1, timestamp, description = pictures.get_pic(conn, dbx, arg_list=users)
assert_valid_returns(url1, timestamp, description)
if not url1.endswith(exclusive_pic):
break
else: # no test coverage
raise Exception("could not find good seed")
with conn.cursor() as cursor:
cursor.execute(f"select setseed(0.{seed})")
url2, timestamp, description = pictures.get_pic(conn, dbx, arg_list=["kun"] + users)
assert_valid_returns(url2, timestamp, description)
assert url2.endswith(exclusive_pic)
for _ in range(10):
url3, timestamp, description = pictures.get_pic(
conn, dbx, arg_list=["kun"] + users
)
assert_valid_returns(url3, timestamp, description)
pic = next(pic for pic in conftest.pics if url3.endswith(pic.path))
assert pic.faces == [2, 3], f"Wrong picture {pic}"
def test_multiple_args(conn: connection, dbx: conftest.MockDropbox) -> None:
arg_list = ["slack_nick2", "topic1", "2001"]
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=arg_list)
assert_valid_returns(url, timestamp, description)
pic = next(pic for pic in conftest.pics if url.endswith(pic.path))
assert pic.topic == "topic1"
assert pic.taken_at.year == 2001
assert 2 in pic.faces
def test_reduce_args(conn: connection, dbx: conftest.MockDropbox) -> None:
arg_list = ["kun", "slack_nick11"]
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=arg_list)
assert description == (
"Fant ikke bilde med `kun`, `slack_nick11`. "
"Her er et bilde med `slack_nick11` i stedet."
)
# Errors:
def test_error_txt(conn: connection, dbx: conftest.MockDropbox) -> None:
url, timestamp, description = pictures.get_pic(conn, dbx, arg_list=["2000"])
assert url.startswith("https")
assert type(timestamp) == dt.datetime
assert description.startswith("Im so stoopid")
assert description.endswith("Her er et tilfeldig bilde i stedet.")
def test_error_txt_with_valid(conn: connection, dbx: conftest.MockDropbox) -> None:
url, timestamp, description = pictures.get_pic(
conn, dbx, arg_list=["1999", "slack_nick5"]
)
assert url.startswith("https")
assert type(timestamp) == dt.datetime
assert description.startswith("Im so stoopid")
assert "Her er et bilde med" in description
pic = next(pic for pic in conftest.pics if url.endswith(pic.path))
assert 5 in pic.faces
def test_error_txt_with_impossible_combination(
conn: connection, dbx: conftest.MockDropbox
) -> None:
url, timestamp, description = pictures.get_pic(
conn, dbx, arg_list=["2001", "topic3"]
)
assert url.startswith("https")
assert type(timestamp) == dt.datetime
assert description.startswith("Fant ikke")
assert "Her er et bilde med" in description
| {
"content_hash": "042852db137da2ed4b924fff6eed0980",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 88,
"avg_line_length": 39.94117647058823,
"alnum_prop": 0.681230567828506,
"repo_name": "eirki/gargbot_3000",
"id": "fabaa84193df29daeb4f669d3f965be4c5196dcf",
"size": "6151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2800"
},
{
"name": "Dockerfile",
"bytes": "1039"
},
{
"name": "HTML",
"bytes": "6032"
},
{
"name": "JavaScript",
"bytes": "36277"
},
{
"name": "Python",
"bytes": "254223"
},
{
"name": "Shell",
"bytes": "994"
}
],
"symlink_target": ""
} |
import warnings
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function
from chainer.functions.normalization import batch_normalization
from chainer.utils import type_check
def _xhat(x, mean, std, expander):
x_mu = x - mean[expander]
x_mu /= std[expander]
return x_mu
class BatchRenormalizationFunction(function.Function):
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9,
rmax=1, dmax=0, update_statistics=True):
self._running_mean = mean
self._running_var = var
self.rmax = rmax
self.dmax = dmax
self.r = None
self.update_statistics = update_statistics
self.eps = eps
self.decay = decay
def _warn_accessing_property(self):
warnings.warn(
'The attributes of BatchRenormalizationFunction '
'are deprecated. '
'Consider setting update_statistics=True to '
'batch_renormalization to update running statistics.',
DeprecationWarning)
@property
def running_mean(self):
self._warn_accessing_property()
return self._running_mean
@property
def running_var(self):
self._warn_accessing_property()
return self._running_var
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
M = type_check.eval(gamma_type.ndim)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= gamma_type.ndim + 1,
x_type.shape[1:1 + M] == gamma_type.shape,
# TODO(tkerola): Check shape
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
x, gamma, beta = inputs
# Note: we must be in train mode.
assert configuration.config.train
if not self.update_statistics:
self._running_mean = xp.array(self._running_mean)
self._running_var = xp.array(self._running_var)
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
axis = (0,) + tuple(range(head_ndim, x.ndim))
mean = x.mean(axis=axis)
var = x.var(axis=axis) + self.eps
self.std = xp.sqrt(var, dtype=var.dtype)
running_sigma = xp.sqrt(self._running_var + self.eps,
dtype=self._running_mean.dtype)
self.r = xp.clip(self.std / running_sigma,
1.0 / self.rmax, self.rmax)
d = xp.clip(
(mean - self._running_mean) / running_sigma,
-self.dmax, self.dmax)
# Update running statistics:
m = x.size // gamma[expander].size
self._running_mean *= self.decay
adjust = m / max(m - 1., 1.) # unbiased estimation
temp_ar = xp.array(mean)
temp_ar *= (1 - self.decay)
self._running_mean += temp_ar
del temp_ar
self._running_var *= self.decay
temp_ar = xp.array(var)
temp_ar *= (1 - self.decay) * adjust
self._running_var += temp_ar
del temp_ar
gamma = gamma[expander]
beta = beta[expander]
if xp is numpy:
self.x_hat = _xhat(x, mean, self.std, expander)
self.x_hat_renorm = self.x_hat * self.r[expander] + d[expander]
y = gamma * self.x_hat_renorm
y += beta
else:
self.x_hat, self.x_hat_renorm, y = cuda.elementwise(
'T x, T mean, T std, T gamma, T beta, T r, T d',
'T x_hat, T x_hat_renorm, T y',
'''
x_hat = (x - mean) / std;
x_hat_renorm = x_hat * r + d;
y = gamma * x_hat_renorm + beta;
''',
'bn_fwd')(x, mean[expander], self.std[expander], gamma,
beta, self.r[expander], d[expander])
return y,
def backward(self, inputs, grad_outputs):
x, gamma, _ = inputs
gy = grad_outputs[0]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
m = gamma.dtype.type(x.size // gamma.size)
axis = (0,) + tuple(range(head_ndim, x.ndim))
xp = backend.get_array_module(x)
# Note: we must be in train mode.
assert configuration.config.train
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
gbeta = gy.sum(axis=axis)
ggamma = (gy * self.x_hat_renorm).sum(axis=axis)
gsigma_batch = (gy * self.x_hat).sum(axis=axis)
if xp is numpy:
scale = (self.r * gamma / self.std)[expander]
gx = scale * (gy - (self.x_hat * gsigma_batch[expander] +
gbeta[expander]) / m)
else:
inv_m = numpy.float32(1) / m
gx = cuda.elementwise(
'T gy, T x_hat, T gamma, T std, T gsigma_batch, T gbeta, \
T inv_m, T r',
'T gx',
'gx = (r * gamma / std) * (gy - (x_hat * gsigma_batch + gbeta) * \
inv_m)',
'bn_bwd')(gy, self.x_hat, gamma[expander],
self.std[expander], gsigma_batch[expander],
gbeta[expander], inv_m, self.r[expander])
return gx, ggamma, gbeta
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with `update_statistics=True` option.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
def fixed_batch_renormalization(x, gamma, beta, mean, var, eps=2e-5):
warnings.warn(
'fixed_batch_renormalization is deprecated. '
'Use fixed_batch_normalization instead.',
DeprecationWarning)
with configuration.using_config('train', False):
return batch_normalization.fixed_batch_normalization(
x, gamma, beta, mean, var, eps
)
| {
"content_hash": "ecf07ea7d972648707eab2f356c34a08",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 82,
"avg_line_length": 36.84158415841584,
"alnum_prop": 0.5704111797903789,
"repo_name": "jnishi/chainer",
"id": "701ded49a9def1494b6f27b91d230ca5138dc869",
"size": "7442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/functions/normalization/batch_renormalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1460543"
},
{
"name": "CMake",
"bytes": "42279"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5121452"
},
{
"name": "Shell",
"bytes": "22130"
}
],
"symlink_target": ""
} |
import datetime
from django.conf import settings
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import force_unicode
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.views.generic.base import View
from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
class YearMixin(object):
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"Return the year for which this view should display data"
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_(u"No year specified"))
return year
class MonthMixin(object):
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"Return the month for which this view should display data"
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_(u"No month specified"))
return month
def get_next_month(self, date):
"""
Get the next valid month.
"""
# next must be the first day of the next month.
if date.month == 12:
next = date.replace(year=date.year + 1, month=1, day=1)
else:
next = date.replace(month=date.month + 1, day=1)
return _get_next_prev(self, next, is_previous=False, period='month')
def get_previous_month(self, date):
"""
Get the previous valid month.
"""
# prev must be the last day of the previous month.
prev = date.replace(day=1) - datetime.timedelta(days=1)
return _get_next_prev(self, prev, is_previous=True, period='month')
class DayMixin(object):
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"Return the day for which this view should display data"
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_(u"No day specified"))
return day
def get_next_day(self, date):
"""
Get the next valid day.
"""
next = date + datetime.timedelta(days=1)
return _get_next_prev(self, next, is_previous=False, period='day')
def get_previous_day(self, date):
"""
Get the previous valid day.
"""
prev = date - datetime.timedelta(days=1)
return _get_next_prev(self, prev, is_previous=True, period='day')
class WeekMixin(object):
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"Return the week for which this view should display data"
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_(u"No week specified"))
return week
def get_next_week(self, date):
"""
Get the next valid week.
"""
# next must be the first day of the next week.
next = date + datetime.timedelta(days=7 - self._get_weekday(date))
return _get_next_prev(self, next, is_previous=False, period='week')
def get_previous_week(self, date):
"""
Get the previous valid week.
"""
# prev must be the last day of the previous week.
prev = date - datetime.timedelta(days=self._get_weekday(date) + 1)
return _get_next_prev(self, prev, is_previous=True, period='week')
def _get_weekday(self, date):
week_format = self.get_week_format()
if week_format == '%W': # week starts on Monday
return date.weekday()
elif week_format == '%U': # week starts on Sunday
return (date.weekday() + 1) % 7
else:
raise ValueError("unknown week format: %s" % week_format)
class DateMixin(object):
"""
Mixin class for views manipulating date-based data.
"""
date_field = None
allow_future = False
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured(u"%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
# Note: the following three methods only work in subclasses that also
# inherit SingleObjectMixin or MultipleObjectMixin.
@cached_property
def uses_datetime_field(self):
"""
Return `True` if the date field is a `DateTimeField` and `False`
if it's a `DateField`.
"""
model = self.get_queryset().model if self.model is None else self.model
field = model._meta.get_field(self.get_date_field())
return isinstance(field, models.DateTimeField)
def _make_date_lookup_arg(self, value):
"""
Convert a date into a datetime when the date field is a DateTimeField.
When time zone support is enabled, `date` is assumed to be in the
current time zone, so that displayed items are consistent with the URL.
"""
if self.uses_datetime_field:
value = datetime.datetime.combine(value, datetime.time.min)
if settings.USE_TZ:
value = timezone.make_aware(value, timezone.get_current_timezone())
return value
def _make_single_date_lookup(self, date):
"""
Get the lookup kwargs for filtering on a single date.
If the date field is a DateTimeField, we can't just filter on
date_field=date because that doesn't take the time into account.
"""
date_field = self.get_date_field()
if self.uses_datetime_field:
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))
return {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
else:
# Skip self._make_date_lookup_arg, it's a no-op in this branch.
return {date_field: date}
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""
Abstract base class for date-based views display a list of objects.
"""
allow_empty = False
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(object_list=self.object_list,
date_list=self.date_list)
context.update(extra_context)
return self.render_to_response(context)
def get_dated_items(self):
"""
Obtain the list of dates and items.
"""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_dated_queryset(self, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
paginate_by = self.get_paginate_by(qs)
if not allow_future:
now = timezone.now() if self.uses_datetime_field else datetime.date.today()
qs = qs.filter(**{'%s__lte' % date_field: now})
if not allow_empty:
# When pagination is enabled, it's better to do a cheap query
# than to load the unpaginated queryset in memory.
is_empty = not bool(qs) if paginate_by is None else not qs.exists()
if is_empty:
raise Http404(_(u"No %(verbose_name_plural)s available") % {
'verbose_name_plural': force_unicode(qs.model._meta.verbose_name_plural)
})
return qs
def get_date_list(self, queryset, date_type):
"""
Get a date list by calling `queryset.dates()`, checking along the way
for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
date_list = queryset.dates(date_field, date_type)[::-1]
if date_list is not None and not date_list and not allow_empty:
name = force_unicode(queryset.model._meta.verbose_name_plural)
raise Http404(_(u"No %(verbose_name_plural)s available") %
{'verbose_name_plural': name})
return date_list
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items.
Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, 'year')
if date_list:
object_list = qs.order_by('-' + self.get_date_field())
else:
object_list = qs.none()
return (date_list, object_list, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""
Top-level archive of date-based items.
"""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
make_object_list = False
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(datetime.date(date.year + 1, 1, 1))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs, 'month')
if self.get_make_object_list():
object_list = qs.order_by('-' + date_field)
else:
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
object_list = qs.none()
return (date_list, object_list, {'year': year})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
# Construct a date-range lookup.
since = self._make_date_lookup_arg(date)
if date.month == 12:
until = self._make_date_lookup_arg(datetime.date(date.year + 1, 1, 1))
else:
until = self._make_date_lookup_arg(datetime.date(date.year, date.month + 1, 1))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs, 'day')
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""
List of objects published in a given week.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
# Construct a date-range lookup.
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=7))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'week': date,
'next_week': self.get_next_week(date),
'previous_week': self.get_previous_week(date),
})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""
List of objects published in a given week.
"""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""
List of objects published on a given day.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""
List of objects published on a given day.
"""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""
List of objects published today.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""
List of objects published today.
"""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""
Get the object this request displays.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = queryset or self.get_queryset()
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_(u"Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super(BaseDetailView, self).get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and day (only year is mandatory). Raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_(u"Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _get_next_prev(generic_view, naive_result, is_previous, period):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles both next and previous months
and days (for MonthArchiveView and DayArchiveView); hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day or month,
reguardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive month
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
# If allow_empty is True the naive value will be valid
if allow_empty:
result = naive_result
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lte' % date_field: generic_view._make_date_lookup_arg(naive_result)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(naive_result)}
ordering = date_field
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
result = None
# Convert datetimes to a dates
if result and generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
if result:
if period == 'month':
# first day of the month
result = result.replace(day=1)
elif period == 'week':
# monday of the week
result = result - datetime.timedelta(days=generic_view._get_weekday(result))
elif period != 'day':
raise ValueError('invalid period: %s' % period)
# Check against future dates.
if result and (allow_future or result < datetime.date.today()):
return result
else:
return None
| {
"content_hash": "da29a3361b12ec19917fdde0fa7770d8",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 126,
"avg_line_length": 34.090225563909776,
"alnum_prop": 0.5911777679752978,
"repo_name": "mitar/django",
"id": "6964624516dc97b80f350ed8ee60f39411c1056b",
"size": "22670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/views/generic/dates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "49825"
},
{
"name": "JavaScript",
"bytes": "89027"
},
{
"name": "Python",
"bytes": "8055913"
},
{
"name": "Shell",
"bytes": "11901"
}
],
"symlink_target": ""
} |
from axiom.scripts.axiomatic import AxiomaticCommand, AxiomaticSubCommand
from twisted.python.filepath import FilePath
from jumpmaplist.items import Author
def steamidTo64(steamid):
steamid = steamid[8:]
y, z = map(int, steamid.split(':'))
return str(z * 2 + y + 76561197960265728)
class AddSuperuser(AxiomaticSubCommand):
longdesc = """
Add a user by steamID who will be able to add or remove other users from
the web interface.
"""
optParameters = (
[ ['steamid', 's', None, 'SteamID to add as a superuser.']
])
def postOptions(self):
store = self.parent.getStore()
steamID = int(self.decodeCommandLine(self['steamid']))
addUser(store, steamID, superuser=True)
class ImportAuthors(AxiomaticSubCommand):
longdesc = """
Import authors from CSV format.
"""
optParameters = (
[ ['file', 'f', None, 'Path to file.']
])
def postOptions(self):
store = self.parent.getStore()
path = self.decodeCommandLine(self['file'])
content = FilePath(path).getContent()
for line in content.strip().split('\n'):
steamID, name = line.split(',', 1)
Author(store=store, name=name.decode('utf8'), steamID=int(steamidTo64(steamID)))
steamID = int(self.decodeCommandLine(self['steamid']))
class MapListCmd(AxiomaticCommand):
name = 'maplist'
description = 'Utilities for interacting with the jump map list database.'
subCommands = (
[ ('add-superuser', None, AddSuperuser, 'Add a superuser.')
, ('import-authors', None, ImportAuthors, 'Import authors.')
])
def getStore(self):
return self.parent.getStore()
| {
"content_hash": "478d0302f11ce1643dd0e3867eb7975d",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 92,
"avg_line_length": 27.396825396825395,
"alnum_prop": 0.6349942062572422,
"repo_name": "jsza/jump-map-list",
"id": "271324e4c60ee078a0e0e0caa2b1ede21cca43bc",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axiom/plugins/maplistcmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2598"
},
{
"name": "HTML",
"bytes": "2412"
},
{
"name": "JavaScript",
"bytes": "67420"
},
{
"name": "Python",
"bytes": "52010"
}
],
"symlink_target": ""
} |
"""Auto-generated file, do not edit by hand. CC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CC = PhoneMetadata(id='CC', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[01]\\d{2}', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='000|112', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='000|112', example_number='112', possible_length=(3,)),
short_data=True)
| {
"content_hash": "df5fff5135951d9cd794547806550616",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 110,
"avg_line_length": 69.875,
"alnum_prop": 0.7477638640429338,
"repo_name": "gencer/python-phonenumbers",
"id": "ade5c03ba59deb55e21374959638834c8d2a5d02",
"size": "559",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "python/phonenumbers/shortdata/region_CC.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23039485"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._activity_operations import build_get_request, build_list_by_module_request
from .._vendor import MixinABC
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ActivityOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.automation.aio.AutomationClient`'s
:attr:`activity` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
automation_account_name: str,
module_name: str,
activity_name: str,
**kwargs: Any
) -> _models.Activity:
"""Retrieve the activity in the module identified by module name and activity name.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param module_name: The name of module.
:type module_name: str
:param activity_name: The name of activity.
:type activity_name: str
:keyword api_version: Api Version. Default value is "2020-01-13-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Activity, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.Activity
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2020-01-13-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Activity]
request = build_get_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
module_name=module_name,
activity_name=activity_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Activity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/modules/{moduleName}/activities/{activityName}"} # type: ignore
@distributed_trace
def list_by_module(
self,
resource_group_name: str,
automation_account_name: str,
module_name: str,
**kwargs: Any
) -> AsyncIterable[_models.ActivityListResult]:
"""Retrieve a list of activities in the module identified by module name.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param module_name: The name of module.
:type module_name: str
:keyword api_version: Api Version. Default value is "2020-01-13-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActivityListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.automation.models.ActivityListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2020-01-13-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ActivityListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_module_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
module_name=module_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_module.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_module_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
module_name=module_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ActivityListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_module.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/modules/{moduleName}/activities"} # type: ignore
| {
"content_hash": "0e104fa91da177e28364048143aba917",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 233,
"avg_line_length": 45.11848341232228,
"alnum_prop": 0.6376050420168067,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a5177e26ee70e6db2a7c27a03a36337d71594c10",
"size": "10020",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/automation/azure-mgmt-automation/azure/mgmt/automation/aio/operations/_activity_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
This script collects statistics about userscripts usage in Russian Wikipedia and
publishes it at [[Участник:NapalmBot/Самые используемые скрипты]]. Script can
detect only importScript functions and do not count cross-wiki script imports.
"""
import re
from collections import Counter
import pywikibot
def ucfirst(string):
"""Return string with first letter in upper case."""
if len(string) < 2:
return string.upper()
else:
return string[:1].upper() + string[1:]
def unificate_link(link):
"""Remove "user:" prefix, deal with trailing spaces and underscores."""
(pagename, prefix) = re.subn(r"^ *(?:[Уу]|[Уу]частник|[Уу]частница|[Uu]|[Uu]ser) *:", "", link)
if not prefix:
return None
return ucfirst(re.sub(" ", "_", pagename).strip("_"))
def process_page(page):
"""Analyze all importScript functions and return a list of used scripts."""
title = r"^[^/]+/(common|vector|cologneblue|minerva|modern|monobook|timeless)\.js$"
comments = r"//.+|/\*(?:.|\n)*?\*/"
scripts = r"importScript *\( *([\"'])([^\"'\n]*?)\1(?: *, *[\"']ru[\"'])? *\)"
if not re.match(title, page.title()):
return []
text = page.text
text = re.sub(comments, "", text)
result = []
for quote, link in re.findall(scripts, text):
link = unificate_link(link)
if link:
result.append(link)
return result
def get_stats(site):
"""Get an { script : count } dictionary."""
result = []
for page in site.search("insource:\"importScript\"", [2], content=True):
result += process_page(page)
return dict(Counter(result))
def main():
"""Main script function."""
site = pywikibot.Site()
stats = get_stats(site)
result = "Последнее обновление: {{subst:#time:j xg Y, H:i}}\n\n"
result += "{| class=\"wikitable sortable\"\n"
result += "! Место !! Скрипт !! Использований\n"
formatstr = "|-\n| {num} || [[Участник:{page}]] || [https://ru.wikipedia.org/w/index.php?search=insource%3A%22{page}%22&ns2=1 {count}]\n"
for num, page in enumerate(sorted(sorted(stats), key=stats.get, reverse=True)):
count = stats[page]
result += formatstr.format(num=num + 1, page=page, count=count)
result += "|}\n\n"
page = pywikibot.Page(site, "Участник:NapalmBot/Самые используемые скрипты")
page.text = result
page.save("Обновление данных.", minor=False)
if __name__ == "__main__":
main()
| {
"content_hash": "8450478fb2540b7914cde485c0f8e14d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 141,
"avg_line_length": 34.54929577464789,
"alnum_prop": 0.6159804321239298,
"repo_name": "Facenapalm/NapalmBot",
"id": "d5750e482e928749de613ec8f3ed604db837db9f",
"size": "2605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/userscripts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169270"
}
],
"symlink_target": ""
} |
"""
First test... just import something...
"""
import pytest
import sys
from pyhaystack.client.niagara import Niagara4HaystackSession
@pytest.mark.skipif(sys.version_info < (3, 4), reason="requires python3 or higher")
def test_conversion_of_str():
unescape = Niagara4HaystackSession.unescape
dct = {
"H.Client.Labo~2f227~2d2~2fBA~2fPC_D~e9bit_Alim": "H.Client.Labo/227-2/BA/PC_Débit_Alim"
}
for k, v in dct.items():
assert unescape(k) == v
| {
"content_hash": "3217e2e857cc301b888fc9f0a2134ad3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 96,
"avg_line_length": 27.941176470588236,
"alnum_prop": 0.6863157894736842,
"repo_name": "ChristianTremblay/pyhaystack",
"id": "ccc0dbbabb39d3236cf3c2a2f8981da2984bb148",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_niagara_escape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "209884"
},
{
"name": "Shell",
"bytes": "721"
}
],
"symlink_target": ""
} |
import sys
from flask import Flask, request
import telepot
from telepot.loop import OrderedWebhook
"""
$ python2.7 flask_skeleton.py <token> <listening_port> <webhook_url>
Webhook path is '/webhook', therefore:
<webhook_url>: https://<base>/webhook
"""
def on_chat_message(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print('Chat Message:', content_type, chat_type, chat_id)
def on_callback_query(msg):
query_id, from_id, data = telepot.glance(msg, flavor='callback_query')
print('Callback query:', query_id, from_id, data)
# need `/setinline`
def on_inline_query(msg):
query_id, from_id, query_string = telepot.glance(msg, flavor='inline_query')
print('Inline Query:', query_id, from_id, query_string)
# Compose your own answers
articles = [{'type': 'article',
'id': 'abc', 'title': 'ABC', 'message_text': 'Good morning'}]
bot.answerInlineQuery(query_id, articles)
# need `/setinlinefeedback`
def on_chosen_inline_result(msg):
result_id, from_id, query_string = telepot.glance(msg, flavor='chosen_inline_result')
print('Chosen Inline Result:', result_id, from_id, query_string)
TOKEN = sys.argv[1]
PORT = int(sys.argv[2])
URL = sys.argv[3]
app = Flask(__name__)
bot = telepot.Bot(TOKEN)
webhook = OrderedWebhook(bot, {'chat': on_chat_message,
'callback_query': on_callback_query,
'inline_query': on_inline_query,
'chosen_inline_result': on_chosen_inline_result})
@app.route('/webhook', methods=['GET', 'POST'])
def pass_update():
webhook.feed(request.data)
return 'OK'
if __name__ == '__main__':
try:
bot.setWebhook(URL)
# Sometimes it would raise this error, but webhook still set successfully.
except telepot.exception.TooManyRequestsError:
pass
webhook.run_as_thread()
app.run(port=PORT, debug=True)
| {
"content_hash": "0713fbbae365d22f09386c0c09b11643",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 89,
"avg_line_length": 30.73015873015873,
"alnum_prop": 0.643595041322314,
"repo_name": "nickoala/telepot",
"id": "4b9bbfc1afb50e4c7947e8979267d27174e969e2",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/webhook/flask_skeleton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "340232"
}
],
"symlink_target": ""
} |
"""Provides device triggers for Shelly."""
from __future__ import annotations
from typing import Any, Final
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.const import (
ATTR_DEVICE_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from . import get_device_wrapper
from .const import (
ATTR_CHANNEL,
ATTR_CLICK_TYPE,
CONF_SUBTYPE,
DOMAIN,
EVENT_SHELLY_CLICK,
INPUTS_EVENTS_SUBTYPES,
SHBTN_INPUTS_EVENTS_TYPES,
SHBTN_MODELS,
SUPPORTED_INPUTS_EVENTS_TYPES,
)
from .utils import get_input_triggers
TRIGGER_SCHEMA: Final = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(SUPPORTED_INPUTS_EVENTS_TYPES),
vol.Required(CONF_SUBTYPE): vol.In(INPUTS_EVENTS_SUBTYPES),
}
)
async def async_validate_trigger_config(
hass: HomeAssistant, config: dict[str, Any]
) -> dict[str, Any]:
"""Validate config."""
config = TRIGGER_SCHEMA(config)
# if device is available verify parameters against device capabilities
wrapper = get_device_wrapper(hass, config[CONF_DEVICE_ID])
if not wrapper or not wrapper.device.initialized:
return config
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
for block in wrapper.device.blocks:
input_triggers = get_input_triggers(wrapper.device, block)
if trigger in input_triggers:
return config
raise InvalidDeviceAutomationConfig(
f"Invalid ({CONF_TYPE},{CONF_SUBTYPE}): {trigger}"
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, Any]]:
"""List device triggers for Shelly devices."""
triggers = []
wrapper = get_device_wrapper(hass, device_id)
if not wrapper:
raise InvalidDeviceAutomationConfig(f"Device not found: {device_id}")
if wrapper.model in SHBTN_MODELS:
for trigger in SHBTN_INPUTS_EVENTS_TYPES:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger,
CONF_SUBTYPE: "button",
}
)
return triggers
for block in wrapper.device.blocks:
input_triggers = get_input_triggers(wrapper.device, block)
for trigger, subtype in input_triggers:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
event_config = {
event_trigger.CONF_PLATFORM: CONF_EVENT,
event_trigger.CONF_EVENT_TYPE: EVENT_SHELLY_CLICK,
event_trigger.CONF_EVENT_DATA: {
ATTR_DEVICE_ID: config[CONF_DEVICE_ID],
ATTR_CHANNEL: INPUTS_EVENTS_SUBTYPES[config[CONF_SUBTYPE]],
ATTR_CLICK_TYPE: config[CONF_TYPE],
},
}
event_config = event_trigger.TRIGGER_SCHEMA(event_config)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
| {
"content_hash": "3747c088e6b584bebfdca9dd3bb0ae3c",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 82,
"avg_line_length": 30.075757575757574,
"alnum_prop": 0.646095717884131,
"repo_name": "Danielhiversen/home-assistant",
"id": "eae2953e5b82b7cf6b169707d92e0ae44810de4b",
"size": "3970",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/shelly/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import urllib
import datetime
import github.GithubObject
import github.PaginatedList
import github.Branch
import github.IssueEvent
import github.ContentFile
import github.Label
import github.GitBlob
import github.Organization
import github.GitRef
import github.Issue
import github.Repository
import github.PullRequest
import github.RepositoryKey
import github.NamedUser
import github.Milestone
import github.Comparison
import github.CommitComment
import github.GitCommit
import github.Team
import github.Commit
import github.GitTree
import github.Hook
import github.Tag
import github.GitTag
import github.Download
import github.Permissions
import github.Event
import github.Legacy
import github.StatsContributor
import github.StatsCommitActivity
import github.StatsCodeFrequency
import github.StatsParticipation
import github.StatsPunchCard
class Repository(github.GithubObject.CompletableGithubObject):
"""
This class represents Repositorys. The reference can be found here http://developer.github.com/v3/repos/
"""
@property
def archive_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._archive_url)
return self._archive_url.value
@property
def assignees_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._assignees_url)
return self._assignees_url.value
@property
def blobs_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._blobs_url)
return self._blobs_url.value
@property
def branches_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._branches_url)
return self._branches_url.value
@property
def clone_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._clone_url)
return self._clone_url.value
@property
def collaborators_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._collaborators_url)
return self._collaborators_url.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def compare_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._compare_url)
return self._compare_url.value
@property
def contents_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._contents_url)
return self._contents_url.value
@property
def contributors_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._contributors_url)
return self._contributors_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def default_branch(self):
"""
:type: string
"""
self._completeIfNotSet(self._default_branch)
return self._default_branch.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def downloads_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._downloads_url)
return self._downloads_url.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def fork(self):
"""
:type: bool
"""
self._completeIfNotSet(self._fork)
return self._fork.value
@property
def forks(self):
"""
:type: integer
"""
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._forks_count)
return self._forks_count.value
@property
def forks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def full_name(self):
"""
:type: string
"""
self._completeIfNotSet(self._full_name)
return self._full_name.value
@property
def git_commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_commits_url)
return self._git_commits_url.value
@property
def git_refs_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_refs_url)
return self._git_refs_url.value
@property
def git_tags_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_tags_url)
return self._git_tags_url.value
@property
def git_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_url)
return self._git_url.value
@property
def has_downloads(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_downloads)
return self._has_downloads.value
@property
def has_issues(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_issues)
return self._has_issues.value
@property
def has_wiki(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_wiki)
return self._has_wiki.value
@property
def homepage(self):
"""
:type: string
"""
self._completeIfNotSet(self._homepage)
return self._homepage.value
@property
def hooks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._hooks_url)
return self._hooks_url.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_comment_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_comment_url)
return self._issue_comment_url.value
@property
def issue_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_events_url)
return self._issue_events_url.value
@property
def issues_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issues_url)
return self._issues_url.value
@property
def keys_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._keys_url)
return self._keys_url.value
@property
def labels_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._labels_url)
return self._labels_url.value
@property
def language(self):
"""
:type: string
"""
self._completeIfNotSet(self._language)
return self._language.value
@property
def languages_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._languages_url)
return self._languages_url.value
@property
def master_branch(self):
"""
:type: string
"""
self._completeIfNotSet(self._master_branch)
return self._master_branch.value
@property
def merges_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._merges_url)
return self._merges_url.value
@property
def milestones_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._milestones_url)
return self._milestones_url.value
@property
def mirror_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._mirror_url)
return self._mirror_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def network_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._network_count)
return self._network_count.value
@property
def notifications_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._notifications_url)
return self._notifications_url.value
@property
def open_issues(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues)
return self._open_issues.value
@property
def open_issues_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues_count)
return self._open_issues_count.value
@property
def organization(self):
"""
:type: :class:`github.Organization.Organization`
"""
self._completeIfNotSet(self._organization)
return self._organization.value
@property
def owner(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def parent(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._parent)
return self._parent.value
@property
def permissions(self):
"""
:type: :class:`github.Permissions.Permissions`
"""
self._completeIfNotSet(self._permissions)
return self._permissions.value
@property
def private(self):
"""
:type: bool
"""
self._completeIfNotSet(self._private)
return self._private.value
@property
def pulls_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._pulls_url)
return self._pulls_url.value
@property
def pushed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._pushed_at)
return self._pushed_at.value
@property
def size(self):
"""
:type: integer
"""
self._completeIfNotSet(self._size)
return self._size.value
@property
def source(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._source)
return self._source.value
@property
def ssh_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._ssh_url)
return self._ssh_url.value
@property
def stargazers_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._stargazers_count) # pragma no cover (Should be covered)
return self._stargazers_count.value # pragma no cover (Should be covered)
@property
def stargazers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._stargazers_url)
return self._stargazers_url.value
@property
def statuses_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._statuses_url)
return self._statuses_url.value
@property
def subscribers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscribers_url)
return self._subscribers_url.value
@property
def subscription_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscription_url)
return self._subscription_url.value
@property
def svn_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._svn_url)
return self._svn_url.value
@property
def tags_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._tags_url)
return self._tags_url.value
@property
def teams_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._teams_url)
return self._teams_url.value
@property
def trees_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._trees_url)
return self._trees_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def watchers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._watchers)
return self._watchers.value
@property
def watchers_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._watchers_count)
return self._watchers_count.value
def add_to_collaborators(self, collaborator):
"""
:calls: `PUT /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/collaborators/" + collaborator
)
def compare(self, base, head):
"""
:calls: `GET /repos/:owner/:repo/compare/:base...:head <http://developer.github.com/v3/repos/commits>`_
:param base: string
:param head: string
:rtype: :class:`github.Comparison.Comparison`
"""
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/compare/" + base + "..." + head
)
return github.Comparison.Comparison(self._requester, headers, data, completed=True)
def create_git_blob(self, content, encoding):
"""
:calls: `POST /repos/:owner/:repo/git/blobs <http://developer.github.com/v3/git/blobs>`_
:param content: string
:param encoding: string
:rtype: :class:`github.GitBlob.GitBlob`
"""
assert isinstance(content, (str, unicode)), content
assert isinstance(encoding, (str, unicode)), encoding
post_parameters = {
"content": content,
"encoding": encoding,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/blobs",
input=post_parameters
)
return github.GitBlob.GitBlob(self._requester, headers, data, completed=True)
def create_git_commit(self, message, tree, parents, author=github.GithubObject.NotSet, committer=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/commits <http://developer.github.com/v3/git/commits>`_
:param message: string
:param tree: :class:`github.GitTree.GitTree`
:param parents: list of :class:`github.GitCommit.GitCommit`
:param author: :class:`github.InputGitAuthor.InputGitAuthor`
:param committer: :class:`github.InputGitAuthor.InputGitAuthor`
:rtype: :class:`github.GitCommit.GitCommit`
"""
assert isinstance(message, (str, unicode)), message
assert isinstance(tree, github.GitTree.GitTree), tree
assert all(isinstance(element, github.GitCommit.GitCommit) for element in parents), parents
assert author is github.GithubObject.NotSet or isinstance(author, github.InputGitAuthor), author
assert committer is github.GithubObject.NotSet or isinstance(committer, github.InputGitAuthor), committer
post_parameters = {
"message": message,
"tree": tree._identity,
"parents": [element._identity for element in parents],
}
if author is not github.GithubObject.NotSet:
post_parameters["author"] = author._identity
if committer is not github.GithubObject.NotSet:
post_parameters["committer"] = committer._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/commits",
input=post_parameters
)
return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def create_git_ref(self, ref, sha):
"""
:calls: `POST /repos/:owner/:repo/git/refs <http://developer.github.com/v3/git/refs>`_
:param ref: string
:param sha: string
:rtype: :class:`github.GitRef.GitRef`
"""
assert isinstance(ref, (str, unicode)), ref
assert isinstance(sha, (str, unicode)), sha
post_parameters = {
"ref": ref,
"sha": sha,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/refs",
input=post_parameters
)
return github.GitRef.GitRef(self._requester, headers, data, completed=True)
def create_git_tag(self, tag, message, object, type, tagger=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/tags <http://developer.github.com/v3/git/tags>`_
:param tag: string
:param message: string
:param object: string
:param type: string
:param tagger: :class:`github.InputGitAuthor.InputGitAuthor`
:rtype: :class:`github.GitTag.GitTag`
"""
assert isinstance(tag, (str, unicode)), tag
assert isinstance(message, (str, unicode)), message
assert isinstance(object, (str, unicode)), object
assert isinstance(type, (str, unicode)), type
assert tagger is github.GithubObject.NotSet or isinstance(tagger, github.InputGitAuthor), tagger
post_parameters = {
"tag": tag,
"message": message,
"object": object,
"type": type,
}
if tagger is not github.GithubObject.NotSet:
post_parameters["tagger"] = tagger._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/tags",
input=post_parameters
)
return github.GitTag.GitTag(self._requester, headers, data, completed=True)
def create_git_tree(self, tree, base_tree=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/trees <http://developer.github.com/v3/git/trees>`_
:param tree: list of :class:`github.InputGitTreeElement.InputGitTreeElement`
:param base_tree: :class:`github.GitTree.GitTree`
:rtype: :class:`github.GitTree.GitTree`
"""
assert all(isinstance(element, github.InputGitTreeElement) for element in tree), tree
assert base_tree is github.GithubObject.NotSet or isinstance(base_tree, github.GitTree.GitTree), base_tree
post_parameters = {
"tree": [element._identity for element in tree],
}
if base_tree is not github.GithubObject.NotSet:
post_parameters["base_tree"] = base_tree._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/trees",
input=post_parameters
)
return github.GitTree.GitTree(self._requester, headers, data, completed=True)
def create_hook(self, name, config, events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/hooks <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param active: bool
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/hooks",
input=post_parameters
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def create_issue(self, title, body=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, milestone=github.GithubObject.NotSet, labels=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/issues <http://developer.github.com/v3/issues>`_
:param title: string
:param body: string
:param assignee: string or :class:`github.NamedUser.NamedUser`
:param milestone: :class:`github.Milestone.Milestone`
:param labels: list of :class:`github.Label.Label`
:rtype: :class:`github.Issue.Issue`
"""
assert isinstance(title, (str, unicode)), title
assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
assert assignee is github.GithubObject.NotSet or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
assert milestone is github.GithubObject.NotSet or isinstance(milestone, github.Milestone.Milestone), milestone
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
post_parameters = {
"title": title,
}
if body is not github.GithubObject.NotSet:
post_parameters["body"] = body
if assignee is not github.GithubObject.NotSet:
if isinstance(assignee, (str, unicode)):
post_parameters["assignee"] = assignee
else:
post_parameters["assignee"] = assignee._identity
if milestone is not github.GithubObject.NotSet:
post_parameters["milestone"] = milestone._identity
if labels is not github.GithubObject.NotSet:
post_parameters["labels"] = [element.name for element in labels]
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/issues",
input=post_parameters
)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def create_key(self, title, key):
"""
:calls: `POST /repos/:owner/:repo/keys <http://developer.github.com/v3/repos/keys>`_
:param title: string
:param key: string
:rtype: :class:`github.RepositoryKey.RepositoryKey`
"""
assert isinstance(title, (str, unicode)), title
assert isinstance(key, (str, unicode)), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/keys",
input=post_parameters
)
return github.RepositoryKey.RepositoryKey(self._requester, headers, data, completed=True, repoUrl=self.url)
def create_label(self, name, color):
"""
:calls: `POST /repos/:owner/:repo/labels <http://developer.github.com/v3/issues/labels>`_
:param name: string
:param color: string
:rtype: :class:`github.Label.Label`
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(color, (str, unicode)), color
post_parameters = {
"name": name,
"color": color,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/labels",
input=post_parameters
)
return github.Label.Label(self._requester, headers, data, completed=True)
def create_milestone(self, title, state=github.GithubObject.NotSet, description=github.GithubObject.NotSet, due_on=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_
:param title: string
:param state: string
:param description: string
:param due_on: date
:rtype: :class:`github.Milestone.Milestone`
"""
assert isinstance(title, (str, unicode)), title
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert due_on is github.GithubObject.NotSet or isinstance(due_on, datetime.date), due_on
post_parameters = {
"title": title,
}
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if due_on is not github.GithubObject.NotSet:
post_parameters["due_on"] = due_on.strftime("%Y-%m-%d")
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/milestones",
input=post_parameters
)
return github.Milestone.Milestone(self._requester, headers, data, completed=True)
def create_pull(self, *args, **kwds):
"""
:calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param issue: :class:`github.Issue.Issue`
:param base: string
:param head: string
:rtype: :class:`github.PullRequest.PullRequest`
"""
if len(args) + len(kwds) == 4:
return self.__create_pull_1(*args, **kwds)
else:
return self.__create_pull_2(*args, **kwds)
def __create_pull_1(self, title, body, base, head):
assert isinstance(title, (str, unicode)), title
assert isinstance(body, (str, unicode)), body
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
return self.__create_pull(title=title, body=body, base=base, head=head)
def __create_pull_2(self, issue, base, head):
assert isinstance(issue, github.Issue.Issue), issue
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
return self.__create_pull(issue=issue._identity, base=base, head=head)
def __create_pull(self, **kwds):
post_parameters = kwds
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/pulls",
input=post_parameters
)
return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, default_branch=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param default_branch: string
:rtype: None
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert default_branch is github.GithubObject.NotSet or isinstance(default_branch, (str, unicode)), default_branch
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if default_branch is not github.GithubObject.NotSet:
post_parameters["default_branch"] = default_branch
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_archive_link(self, archive_format, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/:archive_format/:ref <http://developer.github.com/v3/repos/contents>`_
:param archive_format: string
:param ref: string
:rtype: string
"""
assert isinstance(archive_format, (str, unicode)), archive_format
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url = self.url + "/" + archive_format
if ref is not github.GithubObject.NotSet:
url += "/" + ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
url
)
return headers["location"]
def get_assignees(self):
"""
:calls: `GET /repos/:owner/:repo/assignees <http://developer.github.com/v3/issues/assignees>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/assignees",
None
)
def get_branch(self, branch):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch <http://developer.github.com/v3/repos>`_
:param branch: string
:rtype: :class:`github.Branch.Branch`
"""
assert isinstance(branch, (str, unicode)), branch
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/branches/" + branch
)
return github.Branch.Branch(self._requester, headers, data, completed=True)
def get_branches(self):
"""
:calls: `GET /repos/:owner/:repo/branches <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Branch.Branch`
"""
return github.PaginatedList.PaginatedList(
github.Branch.Branch,
self._requester,
self.url + "/branches",
None
)
def get_collaborators(self):
"""
:calls: `GET /repos/:owner/:repo/collaborators <http://developer.github.com/v3/repos/collaborators>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/collaborators",
None
)
def get_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/comments/:id <http://developer.github.com/v3/repos/comments>`_
:param id: integer
:rtype: :class:`github.CommitComment.CommitComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/comments/" + str(id)
)
return github.CommitComment.CommitComment(self._requester, headers, data, completed=True)
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
"""
return github.PaginatedList.PaginatedList(
github.CommitComment.CommitComment,
self._requester,
self.url + "/comments",
None
)
def get_commit(self, sha):
"""
:calls: `GET /repos/:owner/:repo/commits/:sha <http://developer.github.com/v3/repos/commits>`_
:param sha: string
:rtype: :class:`github.Commit.Commit`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/commits/" + sha
)
return github.Commit.Commit(self._requester, headers, data, completed=True)
def get_commits(self, sha=github.GithubObject.NotSet, path=github.GithubObject.NotSet, since=github.GithubObject.NotSet, until=github.GithubObject.NotSet, author=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/commits <http://developer.github.com/v3/repos/commits>`_
:param sha: string
:param path: string
:param since: datetime.datetime
:param until: datetime.datetime
:param author: string or :class:`github.NamedUser.NamedUser` or :class:`github.AuthenticatedUser.AuthenticatedUser`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit`
"""
assert sha is github.GithubObject.NotSet or isinstance(sha, (str, unicode)), sha
assert path is github.GithubObject.NotSet or isinstance(path, (str, unicode)), path
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
assert until is github.GithubObject.NotSet or isinstance(until, datetime.datetime), until
assert author is github.GithubObject.NotSet or isinstance(author, (str, unicode, github.NamedUser.NamedUser, github.AuthenticatedUser.AuthenticatedUser)), author
url_parameters = dict()
if sha is not github.GithubObject.NotSet:
url_parameters["sha"] = sha
if path is not github.GithubObject.NotSet:
url_parameters["path"] = path
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
if until is not github.GithubObject.NotSet:
url_parameters["until"] = until.strftime("%Y-%m-%dT%H:%M:%SZ")
if author is not github.GithubObject.NotSet:
if isinstance(author, (github.NamedUser.NamedUser, github.AuthenticatedUser.AuthenticatedUser)):
url_parameters["author"] = author.login
else:
url_parameters["author"] = author
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self._requester,
self.url + "/commits",
url_parameters
)
def get_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
return self.get_file_contents(path, ref)
def get_file_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
assert isinstance(path, (str, unicode)), path
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/contents" + path,
parameters=url_parameters
)
return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
def get_dir_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: list of :class:`github.ContentFile.ContentFile`
"""
assert isinstance(path, (str, unicode)), path
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/contents" + path,
parameters=url_parameters
)
# Handle 302 redirect response
if headers.get('status') == '302 Found' and headers.get('location'):
headers, data = self._requester.requestJsonAndCheck(
"GET",
headers['location'],
parameters=url_parameters
)
return [
github.ContentFile.ContentFile(self._requester, headers, attributes, completed=(attributes["type"] != "file")) # Lazy completion only makes sense for files. See discussion here: https://github.com/jacquev6/PyGithub/issues/140#issuecomment-13481130
for attributes in data
]
def get_contributors(self):
"""
:calls: `GET /repos/:owner/:repo/contributors <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/contributors",
None
)
def get_download(self, id):
"""
:calls: `GET /repos/:owner/:repo/downloads/:id <http://developer.github.com/v3/repos/downloads>`_
:param id: integer
:rtype: :class:`github.Download.Download`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/downloads/" + str(id)
)
return github.Download.Download(self._requester, headers, data, completed=True)
def get_downloads(self):
"""
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
"""
return github.PaginatedList.PaginatedList(
github.Download.Download,
self._requester,
self.url + "/downloads",
None
)
def get_events(self):
"""
:calls: `GET /repos/:owner/:repo/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events",
None
)
def get_forks(self):
"""
:calls: `GET /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
Repository,
self._requester,
self.url + "/forks",
None
)
def get_git_blob(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/blobs/:sha <http://developer.github.com/v3/git/blobs>`_
:param sha: string
:rtype: :class:`github.GitBlob.GitBlob`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/blobs/" + sha
)
return github.GitBlob.GitBlob(self._requester, headers, data, completed=True)
def get_git_commit(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/commits/:sha <http://developer.github.com/v3/git/commits>`_
:param sha: string
:rtype: :class:`github.GitCommit.GitCommit`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/commits/" + sha
)
return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def get_git_ref(self, ref):
"""
:calls: `GET /repos/:owner/:repo/git/refs/:ref <http://developer.github.com/v3/git/refs>`_
:param ref: string
:rtype: :class:`github.GitRef.GitRef`
"""
prefix = "/git/refs/"
if not self._requester.FIX_REPO_GET_GIT_REF:
prefix = "/git/"
assert isinstance(ref, (str, unicode)), ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + prefix + ref
)
return github.GitRef.GitRef(self._requester, headers, data, completed=True)
def get_git_refs(self):
"""
:calls: `GET /repos/:owner/:repo/git/refs <http://developer.github.com/v3/git/refs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GitRef.GitRef`
"""
return github.PaginatedList.PaginatedList(
github.GitRef.GitRef,
self._requester,
self.url + "/git/refs",
None
)
def get_git_tag(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/tags/:sha <http://developer.github.com/v3/git/tags>`_
:param sha: string
:rtype: :class:`github.GitTag.GitTag`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/tags/" + sha
)
return github.GitTag.GitTag(self._requester, headers, data, completed=True)
def get_git_tree(self, sha, recursive=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/git/trees/:sha <http://developer.github.com/v3/git/trees>`_
:param sha: string
:param recursive: bool
:rtype: :class:`github.GitTree.GitTree`
"""
assert isinstance(sha, (str, unicode)), sha
assert recursive is github.GithubObject.NotSet or isinstance(recursive, bool), recursive
url_parameters = dict()
if recursive is not github.GithubObject.NotSet:
url_parameters["recursive"] = recursive
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/trees/" + sha,
parameters=url_parameters
)
return github.GitTree.GitTree(self._requester, headers, data, completed=True)
def get_hook(self, id):
"""
:calls: `GET /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param id: integer
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/hooks/" + str(id)
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def get_hooks(self):
"""
:calls: `GET /repos/:owner/:repo/hooks <http://developer.github.com/v3/repos/hooks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Hook.Hook`
"""
return github.PaginatedList.PaginatedList(
github.Hook.Hook,
self._requester,
self.url + "/hooks",
None
)
def get_issue(self, number):
"""
:calls: `GET /repos/:owner/:repo/issues/:number <http://developer.github.com/v3/issues>`_
:param number: integer
:rtype: :class:`github.Issue.Issue`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/issues/" + str(number)
)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def get_issues(self, milestone=github.GithubObject.NotSet, state=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, mentioned=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/issues <http://developer.github.com/v3/issues>`_
:param milestone: :class:`github.Milestone.Milestone` or "none" or "*"
:param state: string
:param assignee: string or :class:`github.NamedUser.NamedUser` or "none" or "*"
:param mentioned: :class:`github.NamedUser.NamedUser`
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert milestone is github.GithubObject.NotSet or milestone == "*" or milestone == "none" or isinstance(milestone, github.Milestone.Milestone), milestone
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert assignee is github.GithubObject.NotSet or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
assert mentioned is github.GithubObject.NotSet or isinstance(mentioned, github.NamedUser.NamedUser), mentioned
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if milestone is not github.GithubObject.NotSet:
if isinstance(milestone, str):
url_parameters["milestone"] = milestone
else:
url_parameters["milestone"] = milestone._identity
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if assignee is not github.GithubObject.NotSet:
if isinstance(assignee, str):
url_parameters["assignee"] = assignee
else:
url_parameters["assignee"] = assignee._identity
if mentioned is not github.GithubObject.NotSet:
url_parameters["mentioned"] = mentioned._identity
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
self.url + "/issues",
url_parameters
)
def get_issues_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/issues/comments <http://developer.github.com/v3/issues/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/issues/comments",
url_parameters
)
def get_issues_event(self, id):
"""
:calls: `GET /repos/:owner/:repo/issues/events/:id <http://developer.github.com/v3/issues/events>`_
:param id: integer
:rtype: :class:`github.IssueEvent.IssueEvent`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/issues/events/" + str(id)
)
return github.IssueEvent.IssueEvent(self._requester, headers, data, completed=True)
def get_issues_events(self):
"""
:calls: `GET /repos/:owner/:repo/issues/events <http://developer.github.com/v3/issues/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent`
"""
return github.PaginatedList.PaginatedList(
github.IssueEvent.IssueEvent,
self._requester,
self.url + "/issues/events",
None
)
def get_key(self, id):
"""
:calls: `GET /repos/:owner/:repo/keys/:id <http://developer.github.com/v3/repos/keys>`_
:param id: integer
:rtype: :class:`github.RepositoryKey.RepositoryKey`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/keys/" + str(id)
)
return github.RepositoryKey.RepositoryKey(self._requester, headers, data, completed=True, repoUrl=self.url)
def get_keys(self):
"""
:calls: `GET /repos/:owner/:repo/keys <http://developer.github.com/v3/repos/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.RepositoryKey.RepositoryKey`
"""
return github.PaginatedList.PaginatedList(
lambda requester, headers, data, completed: github.RepositoryKey.RepositoryKey(requester, headers, data, completed, repoUrl=self.url),
self._requester,
self.url + "/keys",
None
)
def get_label(self, name):
"""
:calls: `GET /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_
:param name: string
:rtype: :class:`github.Label.Label`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/labels/" + urllib.quote(name)
)
return github.Label.Label(self._requester, headers, data, completed=True)
def get_labels(self):
"""
:calls: `GET /repos/:owner/:repo/labels <http://developer.github.com/v3/issues/labels>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label`
"""
return github.PaginatedList.PaginatedList(
github.Label.Label,
self._requester,
self.url + "/labels",
None
)
def get_languages(self):
"""
:calls: `GET /repos/:owner/:repo/languages <http://developer.github.com/v3/repos>`_
:rtype: dict of string to integer
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/languages"
)
return data
def get_milestone(self, number):
"""
:calls: `GET /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_
:param number: integer
:rtype: :class:`github.Milestone.Milestone`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/milestones/" + str(number)
)
return github.Milestone.Milestone(self._requester, headers, data, completed=True)
def get_milestones(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_
:param state: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Milestone.Milestone`
"""
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
url_parameters = dict()
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Milestone.Milestone,
self._requester,
self.url + "/milestones",
url_parameters
)
def get_network_events(self):
"""
:calls: `GET /networks/:owner/:repo/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/networks/" + self.owner.login + "/" + self.name + "/events",
None
)
def get_pull(self, number):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number <http://developer.github.com/v3/pulls>`_
:param number: integer
:rtype: :class:`github.PullRequest.PullRequest`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/pulls/" + str(number)
)
return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
def get_pulls(self, state=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param state: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequest.PullRequest`
"""
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
url_parameters = dict()
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
return github.PaginatedList.PaginatedList(
github.PullRequest.PullRequest,
self._requester,
self.url + "/pulls",
url_parameters
)
def get_pulls_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_pulls_review_comments(sort, direction, since)
def get_pulls_review_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/pulls/comments",
url_parameters
)
def get_readme(self, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/readme <http://developer.github.com/v3/repos/contents>`_
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/readme",
parameters=url_parameters
)
return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
def get_stargazers(self):
"""
:calls: `GET /repos/:owner/:repo/stargazers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/stargazers",
None
)
def get_stats_contributors(self):
"""
:calls: `GET /repos/:owner/:repo/stats/contributors <http://developer.github.com/v3/repos/statistics/#get-contributors-list-with-additions-deletions-and-commit-counts>`_
:rtype: None or list of :class:`github.StatsContributor.StatsContributor`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/contributors"
)
if data == {}:
return None
else:
return [
github.StatsContributor.StatsContributor(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_commit_activity(self):
"""
:calls: `GET /repos/:owner/:repo/stats/commit_activity <developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day>`_
:rtype: None or list of :class:`github.StatsCommitActivity.StatsCommitActivity`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/commit_activity"
)
if data == {}:
return None
else:
return [
github.StatsCommitActivity.StatsCommitActivity(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_code_frequency(self):
"""
:calls: `GET /repos/:owner/:repo/stats/code_frequency <http://developer.github.com/v3/repos/statistics/#get-the-number-of-additions-and-deletions-per-week>`_
:rtype: None or list of :class:`github.StatsCodeFrequency.StatsCodeFrequency`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/code_frequency"
)
if data == {}:
return None
else:
return [
github.StatsCodeFrequency.StatsCodeFrequency(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_participation(self):
"""
:calls: `GET /repos/:owner/:repo/stats/participation <http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else>`_
:rtype: None or :class:`github.StatsParticipation.StatsParticipation`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/participation"
)
if data == {}:
return None
else:
return github.StatsParticipation.StatsParticipation(self._requester, headers, data, completed=True)
def get_stats_punch_card(self):
"""
:calls: `GET /repos/:owner/:repo/stats/punch_card <http://developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day>`_
:rtype: None or :class:`github.StatsPunchCard.StatsPunchCard`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/punch_card"
)
if data == {}:
return None
else:
return github.StatsPunchCard.StatsPunchCard(self._requester, headers, data, completed=True)
def get_subscribers(self):
"""
:calls: `GET /repos/:owner/:repo/subscribers <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/subscribers",
None
)
def get_tags(self):
"""
:calls: `GET /repos/:owner/:repo/tags <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Tag.Tag`
"""
return github.PaginatedList.PaginatedList(
github.Tag.Tag,
self._requester,
self.url + "/tags",
None
)
def get_teams(self):
"""
:calls: `GET /repos/:owner/:repo/teams <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.url + "/teams",
None
)
def get_watchers(self):
"""
:calls: `GET /repos/:owner/:repo/watchers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/watchers",
None
)
def has_in_assignees(self, assignee):
"""
:calls: `GET /repos/:owner/:repo/assignees/:assignee <http://developer.github.com/v3/issues/assignees>`_
:param assignee: string or :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
if isinstance(assignee, github.NamedUser.NamedUser):
assignee = assignee._identity
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/assignees/" + assignee
)
return status == 204
def has_in_collaborators(self, collaborator):
"""
:calls: `GET /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/collaborators/" + collaborator
)
return status == 204
def legacy_search_issues(self, state, keyword):
"""
:calls: `GET /legacy/issues/search/:owner/:repository/:state/:keyword <http://developer.github.com/v3/search/legacy>`_
:param state: "open" or "closed"
:param keyword: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert state in ["open", "closed"], state
assert isinstance(keyword, (str, unicode)), keyword
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/legacy/issues/search/" + self.owner.login + "/" + self.name + "/" + state + "/" + urllib.quote(keyword)
)
return [
github.Issue.Issue(self._requester, headers, github.Legacy.convertIssue(element), completed=False)
for element in data["issues"]
]
def merge(self, base, head, commit_message=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/merges <http://developer.github.com/v3/repos/merging>`_
:param base: string
:param head: string
:param commit_message: string
:rtype: :class:`github.Commit.Commit`
"""
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
post_parameters = {
"base": base,
"head": head,
}
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/merges",
input=post_parameters
)
if data is None:
return None
else:
return github.Commit.Commit(self._requester, headers, data, completed=True)
def remove_from_collaborators(self, collaborator):
"""
:calls: `DELETE /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/collaborators/" + collaborator
)
def subscribe_to_hub(self, event, callback, secret=github.GithubObject.NotSet):
"""
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
"""
return self._hub("subscribe", event, callback, secret)
def unsubscribe_from_hub(self, event, callback):
"""
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
"""
return self._hub("unsubscribe", event, callback, github.GithubObject.NotSet)
def _hub(self, mode, event, callback, secret):
assert isinstance(mode, (str, unicode)), mode
assert isinstance(event, (str, unicode)), event
assert isinstance(callback, (str, unicode)), callback
assert secret is github.GithubObject.NotSet or isinstance(secret, (str, unicode)), secret
post_parameters = {
"hub.mode": mode,
"hub.topic": "https://github.com/" + self.full_name + "/events/" + event,
"hub.callback": callback,
}
if secret is not github.GithubObject.NotSet:
post_parameters["hub.secret"] = secret
headers, output = self._requester.requestMultipartAndCheck(
"POST",
"/hub",
input=post_parameters
)
@property
def _identity(self):
return self.owner.login + "/" + self.name
def _initAttributes(self):
self._archive_url = github.GithubObject.NotSet
self._assignees_url = github.GithubObject.NotSet
self._blobs_url = github.GithubObject.NotSet
self._branches_url = github.GithubObject.NotSet
self._clone_url = github.GithubObject.NotSet
self._collaborators_url = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._compare_url = github.GithubObject.NotSet
self._contents_url = github.GithubObject.NotSet
self._contributors_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._default_branch = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._downloads_url = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._fork = github.GithubObject.NotSet
self._forks = github.GithubObject.NotSet
self._forks_count = github.GithubObject.NotSet
self._forks_url = github.GithubObject.NotSet
self._full_name = github.GithubObject.NotSet
self._git_commits_url = github.GithubObject.NotSet
self._git_refs_url = github.GithubObject.NotSet
self._git_tags_url = github.GithubObject.NotSet
self._git_url = github.GithubObject.NotSet
self._has_downloads = github.GithubObject.NotSet
self._has_issues = github.GithubObject.NotSet
self._has_wiki = github.GithubObject.NotSet
self._homepage = github.GithubObject.NotSet
self._hooks_url = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue_comment_url = github.GithubObject.NotSet
self._issue_events_url = github.GithubObject.NotSet
self._issues_url = github.GithubObject.NotSet
self._keys_url = github.GithubObject.NotSet
self._labels_url = github.GithubObject.NotSet
self._language = github.GithubObject.NotSet
self._languages_url = github.GithubObject.NotSet
self._master_branch = github.GithubObject.NotSet
self._merges_url = github.GithubObject.NotSet
self._milestones_url = github.GithubObject.NotSet
self._mirror_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._network_count = github.GithubObject.NotSet
self._notifications_url = github.GithubObject.NotSet
self._open_issues = github.GithubObject.NotSet
self._open_issues_count = github.GithubObject.NotSet
self._organization = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
self._parent = github.GithubObject.NotSet
self._permissions = github.GithubObject.NotSet
self._private = github.GithubObject.NotSet
self._pulls_url = github.GithubObject.NotSet
self._pushed_at = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._source = github.GithubObject.NotSet
self._ssh_url = github.GithubObject.NotSet
self._stargazers_count = github.GithubObject.NotSet
self._stargazers_url = github.GithubObject.NotSet
self._statuses_url = github.GithubObject.NotSet
self._subscribers_url = github.GithubObject.NotSet
self._subscription_url = github.GithubObject.NotSet
self._svn_url = github.GithubObject.NotSet
self._tags_url = github.GithubObject.NotSet
self._teams_url = github.GithubObject.NotSet
self._trees_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._watchers = github.GithubObject.NotSet
self._watchers_count = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "archive_url" in attributes: # pragma no branch
self._archive_url = self._makeStringAttribute(attributes["archive_url"])
if "assignees_url" in attributes: # pragma no branch
self._assignees_url = self._makeStringAttribute(attributes["assignees_url"])
if "blobs_url" in attributes: # pragma no branch
self._blobs_url = self._makeStringAttribute(attributes["blobs_url"])
if "branches_url" in attributes: # pragma no branch
self._branches_url = self._makeStringAttribute(attributes["branches_url"])
if "clone_url" in attributes: # pragma no branch
self._clone_url = self._makeStringAttribute(attributes["clone_url"])
if "collaborators_url" in attributes: # pragma no branch
self._collaborators_url = self._makeStringAttribute(attributes["collaborators_url"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "compare_url" in attributes: # pragma no branch
self._compare_url = self._makeStringAttribute(attributes["compare_url"])
if "contents_url" in attributes: # pragma no branch
self._contents_url = self._makeStringAttribute(attributes["contents_url"])
if "contributors_url" in attributes: # pragma no branch
self._contributors_url = self._makeStringAttribute(attributes["contributors_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "default_branch" in attributes: # pragma no branch
self._default_branch = self._makeStringAttribute(attributes["default_branch"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "downloads_url" in attributes: # pragma no branch
self._downloads_url = self._makeStringAttribute(attributes["downloads_url"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "fork" in attributes: # pragma no branch
self._fork = self._makeBoolAttribute(attributes["fork"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeIntAttribute(attributes["forks"])
if "forks_count" in attributes: # pragma no branch
self._forks_count = self._makeIntAttribute(attributes["forks_count"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "full_name" in attributes: # pragma no branch
self._full_name = self._makeStringAttribute(attributes["full_name"])
if "git_commits_url" in attributes: # pragma no branch
self._git_commits_url = self._makeStringAttribute(attributes["git_commits_url"])
if "git_refs_url" in attributes: # pragma no branch
self._git_refs_url = self._makeStringAttribute(attributes["git_refs_url"])
if "git_tags_url" in attributes: # pragma no branch
self._git_tags_url = self._makeStringAttribute(attributes["git_tags_url"])
if "git_url" in attributes: # pragma no branch
self._git_url = self._makeStringAttribute(attributes["git_url"])
if "has_downloads" in attributes: # pragma no branch
self._has_downloads = self._makeBoolAttribute(attributes["has_downloads"])
if "has_issues" in attributes: # pragma no branch
self._has_issues = self._makeBoolAttribute(attributes["has_issues"])
if "has_wiki" in attributes: # pragma no branch
self._has_wiki = self._makeBoolAttribute(attributes["has_wiki"])
if "homepage" in attributes: # pragma no branch
self._homepage = self._makeStringAttribute(attributes["homepage"])
if "hooks_url" in attributes: # pragma no branch
self._hooks_url = self._makeStringAttribute(attributes["hooks_url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_comment_url" in attributes: # pragma no branch
self._issue_comment_url = self._makeStringAttribute(attributes["issue_comment_url"])
if "issue_events_url" in attributes: # pragma no branch
self._issue_events_url = self._makeStringAttribute(attributes["issue_events_url"])
if "issues_url" in attributes: # pragma no branch
self._issues_url = self._makeStringAttribute(attributes["issues_url"])
if "keys_url" in attributes: # pragma no branch
self._keys_url = self._makeStringAttribute(attributes["keys_url"])
if "labels_url" in attributes: # pragma no branch
self._labels_url = self._makeStringAttribute(attributes["labels_url"])
if "language" in attributes: # pragma no branch
self._language = self._makeStringAttribute(attributes["language"])
if "languages_url" in attributes: # pragma no branch
self._languages_url = self._makeStringAttribute(attributes["languages_url"])
if "master_branch" in attributes: # pragma no branch
self._master_branch = self._makeStringAttribute(attributes["master_branch"])
if "merges_url" in attributes: # pragma no branch
self._merges_url = self._makeStringAttribute(attributes["merges_url"])
if "milestones_url" in attributes: # pragma no branch
self._milestones_url = self._makeStringAttribute(attributes["milestones_url"])
if "mirror_url" in attributes: # pragma no branch
self._mirror_url = self._makeStringAttribute(attributes["mirror_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "network_count" in attributes: # pragma no branch
self._network_count = self._makeIntAttribute(attributes["network_count"])
if "notifications_url" in attributes: # pragma no branch
self._notifications_url = self._makeStringAttribute(attributes["notifications_url"])
if "open_issues" in attributes: # pragma no branch
self._open_issues = self._makeIntAttribute(attributes["open_issues"])
if "open_issues_count" in attributes: # pragma no branch
self._open_issues_count = self._makeIntAttribute(attributes["open_issues_count"])
if "organization" in attributes: # pragma no branch
self._organization = self._makeClassAttribute(github.Organization.Organization, attributes["organization"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "parent" in attributes: # pragma no branch
self._parent = self._makeClassAttribute(Repository, attributes["parent"])
if "permissions" in attributes: # pragma no branch
self._permissions = self._makeClassAttribute(github.Permissions.Permissions, attributes["permissions"])
if "private" in attributes: # pragma no branch
self._private = self._makeBoolAttribute(attributes["private"])
if "pulls_url" in attributes: # pragma no branch
self._pulls_url = self._makeStringAttribute(attributes["pulls_url"])
if "pushed_at" in attributes: # pragma no branch
self._pushed_at = self._makeDatetimeAttribute(attributes["pushed_at"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "source" in attributes: # pragma no branch
self._source = self._makeClassAttribute(Repository, attributes["source"])
if "ssh_url" in attributes: # pragma no branch
self._ssh_url = self._makeStringAttribute(attributes["ssh_url"])
if "stargazers_count" in attributes: # pragma no branch
self._stargazers_count = self._makeIntAttribute(attributes["stargazers_count"])
if "stargazers_url" in attributes: # pragma no branch
self._stargazers_url = self._makeStringAttribute(attributes["stargazers_url"])
if "statuses_url" in attributes: # pragma no branch
self._statuses_url = self._makeStringAttribute(attributes["statuses_url"])
if "subscribers_url" in attributes: # pragma no branch
self._subscribers_url = self._makeStringAttribute(attributes["subscribers_url"])
if "subscription_url" in attributes: # pragma no branch
self._subscription_url = self._makeStringAttribute(attributes["subscription_url"])
if "svn_url" in attributes: # pragma no branch
self._svn_url = self._makeStringAttribute(attributes["svn_url"])
if "tags_url" in attributes: # pragma no branch
self._tags_url = self._makeStringAttribute(attributes["tags_url"])
if "teams_url" in attributes: # pragma no branch
self._teams_url = self._makeStringAttribute(attributes["teams_url"])
if "trees_url" in attributes: # pragma no branch
self._trees_url = self._makeStringAttribute(attributes["trees_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "watchers" in attributes: # pragma no branch
self._watchers = self._makeIntAttribute(attributes["watchers"])
if "watchers_count" in attributes: # pragma no branch
self._watchers_count = self._makeIntAttribute(attributes["watchers_count"])
| {
"content_hash": "5da604785daf84be3187d98a20b05e56",
"timestamp": "",
"source": "github",
"line_count": 2163,
"max_line_length": 312,
"avg_line_length": 39.78363384188627,
"alnum_prop": 0.6119788035141542,
"repo_name": "ARMmbed/yotta_osx_installer",
"id": "f57a1aa30fda37637ea6fbf0ece180f1510478cd",
"size": "88346",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "workspace/lib/python2.7/site-packages/github/Repository.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "46"
},
{
"name": "Assembly",
"bytes": "29493"
},
{
"name": "Batchfile",
"bytes": "1321"
},
{
"name": "C",
"bytes": "3589917"
},
{
"name": "C++",
"bytes": "10603800"
},
{
"name": "CMake",
"bytes": "2408460"
},
{
"name": "CSS",
"bytes": "17863"
},
{
"name": "Emacs Lisp",
"bytes": "14305"
},
{
"name": "FORTRAN",
"bytes": "2105"
},
{
"name": "Groff",
"bytes": "3889491"
},
{
"name": "HTML",
"bytes": "31505361"
},
{
"name": "JavaScript",
"bytes": "90647"
},
{
"name": "Logos",
"bytes": "8877"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "254392"
},
{
"name": "Python",
"bytes": "7903768"
},
{
"name": "Shell",
"bytes": "36795"
},
{
"name": "VimL",
"bytes": "8478"
},
{
"name": "XC",
"bytes": "8384"
},
{
"name": "XS",
"bytes": "8334"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.enums",
marshal="google.ads.googleads.v11",
manifest={"ManagerLinkStatusEnum",},
)
class ManagerLinkStatusEnum(proto.Message):
r"""Container for enum describing possible status of a manager
and client link.
"""
class ManagerLinkStatus(proto.Enum):
r"""Possible statuses of a link."""
UNSPECIFIED = 0
UNKNOWN = 1
ACTIVE = 2
INACTIVE = 3
PENDING = 4
REFUSED = 5
CANCELED = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "943f2bd98f43436ef070c117b69f0ac0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 66,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.6176470588235294,
"repo_name": "googleads/google-ads-python",
"id": "54540293cfe240d8ac8d964e7fbb09b044d08441",
"size": "1212",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/enums/types/manager_link_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
def gen_do_gradient(op, g_output):
"""
Generates gradient Do operator, given forward Do op and a list
of gradient blobs corresponding to forward op's outputs
Returns a gradient op and a list of blobs corresponding to input gradients
"""
from caffe2.python.core import BlobReference
subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name = \
_do_op_sanity_check_and_process(op)
assert len(g_output) == len(op.output), \
"Different number of gradient blobs and Do op outputs"
# From the outer net point of view:
# Do is an operator that has some number of inputs and outputs;
# we have to generate a gradient operator that writes into
# corresponding input gradient blobs and has access to inputs, outputs
# and gradient output blobs
# From the inner net point of view:
# Do is an operator with a subnet and blob bindings,
# we need to forward Do's output blob gradients into inner workspace,
# use them to run backward pass generation and forward Do's input blob
# gradients back into outer workspace
op_output = [str(o) for o in op.output]
op_output = op_output[:-1] # remove workspace pointer blob
op_input = [str(i) for i in op.input]
op_input = op_input[:-1] # remove workspace pointer blob
ordered_inner_output_blob_names = [outer_to_inner_map[o] for o in op_output]
backward_pass_initial_grad_map = {}
initial_grad_map = {}
for inner_output_name, outer_grad_output_name in \
zip(ordered_inner_output_blob_names, g_output):
# link inner_output_name to corresponding inner_grad_output_name for
# backward pass generation;
if outer_grad_output_name:
inner_grad_output_name = inner_output_name + "/_DO_OPERATOR_INNER_GRAD_"
backward_pass_initial_grad_map[BlobReference(inner_output_name)] = \
BlobReference(inner_grad_output_name)
initial_grad_map[inner_grad_output_name] = str(outer_grad_output_name)
assert len(initial_grad_map) > 0, "Empty initial gradient map for Do op"
inner_grad_ops, inner_grad_names_map = _gen_subgradient_pass(
subnet, backward_pass_initial_grad_map)
if len(inner_grad_ops) == 0:
return [], []
grad_copy_ops = []
g_input = []
new_op_outputs = []
new_blob_bindings = {}
for outer_input_name in op_input:
inner_input_name = outer_to_inner_map[outer_input_name]
if inner_input_name in inner_grad_names_map:
inner_grad_input_name = inner_grad_names_map[inner_input_name]
outer_grad_input_name = outer_input_name + "_grad"
# It is possible that inner_grad_input_name will need to be
# linked to another outer blob. For example:
#
# // y - param initialized in init_net
# x = ...
# z = ...
# with ops.IfNet(...):
# ops.Add([z, x], y) # inner Do block
# loss = f(..., y, ...)
#
# In this case x, y and z are external for the inner Do block,
# the inputs of the Do block are z and x and the output is y.
# When computing the gradient of input x given the gradient
# of output y it's easy to see that they are equal.
# During the generation of gradient Do operator, we link
# external gradient y (y_grad) to the internal name
# (y/_DO_OPERATOR_INNER_GRAD_) and generate the backward pass
# for the internal Do net. As a result we get gradient operators
# for the gradient Do and gradient map that maps internal Do
# blobs to their computed gradients.
# In this example, gradient map may have blob x linked to
# gradient blob y/_DO_OPERATOR_INNER_GRAD_.
# We should export gradient for x outside of Do, so
# we add a blob mapping from inner gradient blob
# (y/_DO_OPERATOR_INNER_GRAD_) to a new outer name (x_grad).
#
# (Note: since we use transparent blob mapping between outer and
# inner (Do's) workspace, these operations do not involve copying
# but are merely using blobs in outer workspace in the Do's operator
# workspace under (possibly) different names)
#
# At the same time, we need to add a blob mapping from inner name
# y/_DO_OPERATOR_INNER_GRAD_ to the outer blob y_grad
# Hence in this case, we cannot use existing blob mapping scheme
# that requires a bijection between subset of inner blob names and
# a set of all (Do's input and output) outer blob names
# TODO(iliacher): Remove unnecessary blob copying
new_inner_grad_input_name = \
inner_input_name + "/_DO_OPERATOR_INNER_GRAD_COPY_"
grad_copy_ops.append(_prepare_blob_copy_op(
inner_grad_input_name, new_inner_grad_input_name))
new_blob_bindings[new_inner_grad_input_name] = outer_grad_input_name
new_op_outputs.append(outer_grad_input_name)
g_input.append(outer_grad_input_name)
else:
g_input.append(None)
new_op_inputs = []
overwritten_names = set()
saved_local_blob_names = set()
for grad_op in inner_grad_ops:
grad_op_input = [str(i) for i in grad_op.input]
grad_op_output = [str(o) for o in grad_op.output]
for grad_op_input_name in grad_op_input:
if grad_op_input_name in overwritten_names:
continue
# check if this is an external blob
outer_name = inner_to_outer_map.get(grad_op_input_name, None)
if not outer_name:
# check if this is an external gradient blob
outer_name = initial_grad_map.get(grad_op_input_name, None)
if outer_name:
outer_name = str(outer_name)
if outer_name not in new_op_inputs:
new_op_inputs.append(outer_name)
new_blob_bindings[grad_op_input_name] = outer_name
else:
# this is a local blob, we'll get it's value from
# a saved forward op workspace
saved_local_blob_names.add(grad_op_input_name)
overwritten_names.update(grad_op_output)
# add inner gradient copy ops
inner_grad_ops += grad_copy_ops
gradient_do_def = _prepare_gradient_do_op(
fwd_op=op,
fwd_net=subnet,
grad_ops=inner_grad_ops,
inputs=new_op_inputs,
outputs=new_op_outputs,
blob_bindings=new_blob_bindings,
saved_fwd_blobs=saved_local_blob_names,
workspace_blob_name=workspace_blob_name)
_do_op_sanity_check_and_process(gradient_do_def)
return [gradient_do_def], g_input
def gen_if_gradient(op, g_output):
"""
Generates gradient If operator, given forward If op and a list
of gradient blobs corresponding to forward op's outputs
Returns a gradient op and a list of blobs corresponding to input gradients
"""
from caffe2.python.core import BlobReference
assert op.type == "If", "Expected If op"
# first input is the condition blob
assert len(op.input) > 0, "Expected at least one input in If op"
assert len(op.output) == len(g_output), \
"Different number of gradient blobs and If op outputs"
init_grad_map = {} # map from if's output blob to output gradient blob
op_input = [str(i) for i in op.input]
op_output = [str(o) for o in op.output]
for output_name, grad_output_name in zip(op_output, g_output):
if grad_output_name:
init_grad_map[BlobReference(output_name)] = \
BlobReference(grad_output_name)
# shouldn't call without at least one output gradient available
assert len(init_grad_map) > 0, "Empty initial gradient map for If op"
grad_map = {} # map from blob to gradient blob
then_net = _get_net_argument(op, "then_net")
assert then_net, "Expected then subnet in If op"
then_grad_net, then_grad_map, then_input_names, then_output_names = \
_gen_if_branch_gradient(then_net, init_grad_map)
assert then_grad_net, "Failed to get gradient net for then in If op"
grad_map.update(then_grad_map)
else_input_names = set()
else_output_names = set()
else_grad_map = {}
else_grad_net = None
else_net = _get_net_argument(op, "else_net")
if else_net:
else_grad_net, else_grad_map, else_input_names, else_output_names = \
_gen_if_branch_gradient(else_net, init_grad_map)
assert else_grad_net, "Failed to get gradient net for else in If op"
grad_map.update(else_grad_map)
# make sure gradients of blobs that were not computed
# by the selected if's branch are initialized with zeros
then_other_output_names = \
then_output_names - (then_output_names & else_output_names)
then_other_grad_output_names = set(
[o for o in then_other_output_names if o in then_grad_map.values()])
zero_then = _gen_grad_zero_init_ops(then_grad_map, then_other_grad_output_names)
if else_grad_net:
else_grad_net.op.extend(zero_then)
elif len(zero_then) > 0:
else_grad_net = caffe2_pb2.NetDef()
else_grad_net.CopyFrom(then_grad_net)
if else_grad_net.name:
else_grad_net.name += "_auto_else_zero_blobs_"
del else_grad_net.op[:]
else_grad_net.op.extend(zero_then)
del else_grad_net.external_input[:]
del else_grad_net.external_output[:]
else_other_output_names = \
else_output_names - (then_output_names & else_output_names)
else_other_grad_output_names = set(
[o for o in else_other_output_names if o in else_grad_map.values()])
zero_else = _gen_grad_zero_init_ops(else_grad_map, else_other_grad_output_names)
then_grad_net.op.extend(zero_else)
output_names = list(then_output_names | else_output_names)
input_names = then_input_names | else_input_names
# make sure condition blob is the first in the list
input_names = [op_input[0]] + list(input_names - set(op_input[0]))
gradient_if_def = _prepare_gradient_if_op(
fwd_op=op,
input_names=input_names,
output_names=output_names,
then_grad_net=then_grad_net,
else_grad_net=else_grad_net)
g_input = [grad_map.get(i, None) for i in op_input]
return [gradient_if_def], g_input
def _gen_if_branch_gradient(subnet, init_grad):
grad_ops, grad_names_map = _gen_subgradient_pass(
subnet, init_grad)
output_names = set()
input_names = set()
for grad_op in grad_ops:
for grad_op_input in grad_op.input:
if str(grad_op_input) not in output_names:
input_names.add(str(grad_op_input))
for grad_op_output in grad_op.output:
output_names.add(str(grad_op_output))
gradient_net_def = caffe2_pb2.NetDef()
gradient_net_def.CopyFrom(subnet)
if gradient_net_def.name:
gradient_net_def.name += "_grad"
del gradient_net_def.op[:]
gradient_net_def.op.extend(grad_ops)
del gradient_net_def.external_input[:]
del gradient_net_def.external_output[:]
return gradient_net_def, grad_names_map, input_names, output_names
def _get_net_argument(op, net_name):
for arg in op.arg:
if arg.name and arg.name == net_name:
assert arg.n, "Expected non empty net argument " + net_name
return arg.n
return None
def _gen_subgradient_pass(subnet, init_grad):
from caffe2.python.core import IR
subnet_ir = IR(subnet.op)
grad_ops, grad_blob_map = \
subnet_ir.GetBackwardPass(init_grad)
grad_names_map = {}
for b, g in grad_blob_map.items():
grad_names_map[str(b)] = str(g)
return grad_ops, grad_names_map
def _do_op_sanity_check_and_process(op):
assert op.type == "Do", "Expected Do op"
subnet = _get_net_argument(op, "net")
assert subnet, "No net argument found in Do op"
inner_blobs = None
outer_blobs_idx = None
for arg in op.arg:
if arg.name and arg.name == "inner_blobs":
assert not inner_blobs, "inner_blobs redefinition"
assert arg.strings and len(arg.strings) > 0, \
"Empty inner_blobs argument in Do op"
inner_blobs = [s.decode('utf-8') for s in arg.strings]
if arg.name and arg.name == "outer_blobs_idx":
assert not outer_blobs_idx, "outer_blobs_idx redefinition"
assert arg.ints and len(arg.ints) > 0, \
"Empty outer_blobs_idx argument in Do op"
outer_blobs_idx = arg.ints
if inner_blobs and outer_blobs_idx:
break
assert inner_blobs, "No inner_blobs argument found in Do op"
assert outer_blobs_idx, "No outer_blobs_idx argument found in Do op"
assert len(inner_blobs) == len(outer_blobs_idx), \
"Arguments inner_blobs and outer_blobs_idx of different length in Do op"
all_inner_blobs = set(inner_blobs)
assert len(all_inner_blobs) == len(inner_blobs), \
"Found duplicates in inner_blobs in Do op"
op_input = [str(i) for i in op.input]
assert len(op_input) > 0, "Expected at least one input blob"
# remove last input blob that holds pointer to workspace
input_workspace_blob_name = op_input[-1]
op_input = op_input[:-1]
op_output = [str(o) for o in op.output]
assert len(op_output) > 0, "Expected at least one output blob"
# remove last output blob that holds pointer to workspace
workspace_blob_name = op_output[-1]
assert input_workspace_blob_name == workspace_blob_name, \
"Expected same input/output workspace blob"
op_output = op_output[:-1]
all_op_input_blob_names = set(op_input)
assert len(all_op_input_blob_names) == len(op_input), \
"Found duplicates in Do op inputs"
all_op_output_blob_names = set(op_output)
assert len(all_op_output_blob_names) == len(op_output), \
"Found duplicates in Do op outputs"
ordered_outer_blob_names = op_input + op_output
all_outer_blob_names = set(ordered_outer_blob_names)
used_outer_blob_names = set()
outer_to_inner_map = {}
inner_to_outer_map = {}
for inner_name, outer_blob_idx in zip(inner_blobs, outer_blobs_idx):
assert outer_blob_idx >= 0 and \
outer_blob_idx < len(ordered_outer_blob_names), \
"Outer blob index is out of bounds in Do op"
outer_name = ordered_outer_blob_names[outer_blob_idx]
assert outer_name not in used_outer_blob_names, \
"Reusage of outer blob name " + outer_name + " in Do op"
used_outer_blob_names.add(outer_name)
outer_to_inner_map[outer_name] = inner_name
inner_to_outer_map[inner_name] = outer_name
assert len(used_outer_blob_names) == len(all_outer_blob_names), \
"Not all outer blob names are used in blob bindings in Do op"
return subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name
def _prepare_blob_copy_op(from_name, to_name):
copy_op_def = caffe2_pb2.OperatorDef()
copy_op_def.type = "Copy"
copy_op_def.input.extend([from_name])
copy_op_def.output.extend([to_name])
return copy_op_def
def _prepare_gradient_do_op(
fwd_op, fwd_net, grad_ops, inputs, outputs, blob_bindings, saved_fwd_blobs,
workspace_blob_name):
gradient_net_def = caffe2_pb2.NetDef()
gradient_net_def.CopyFrom(fwd_net)
if gradient_net_def.name:
gradient_net_def.name += "_grad"
del gradient_net_def.op[:]
gradient_net_def.op.extend(grad_ops)
del gradient_net_def.external_input[:]
del gradient_net_def.external_output[:]
gradient_do_def = caffe2_pb2.OperatorDef()
gradient_do_def.CopyFrom(fwd_op)
if gradient_do_def.name and len(gradient_do_def.name) > 0:
gradient_do_def.name += "_grad"
del gradient_do_def.input[:]
gradient_do_def.input.extend(inputs)
# workspace pointer blob
gradient_do_def.input.append(workspace_blob_name)
del gradient_do_def.output[:]
gradient_do_def.output.extend(outputs)
# workspace pointer blob
gradient_do_def.output.append(workspace_blob_name)
net_arg = caffe2_pb2.Argument()
net_arg.name = "net"
net_arg.n.CopyFrom(gradient_net_def)
ordered_new_outer_names = inputs + outputs
inner_blobs = blob_bindings.keys()
new_outer_blobs_idx = [ordered_new_outer_names.index(blob_bindings[b])
for b in inner_blobs]
inner_blobs_arg = caffe2_pb2.Argument()
inner_blobs_arg.name = "inner_blobs"
inner_blobs_arg.strings.extend([b.encode('utf-8') for b in inner_blobs])
outer_blobs_idx_arg = caffe2_pb2.Argument()
outer_blobs_idx_arg.name = "outer_blobs_idx"
outer_blobs_idx_arg.ints.extend(new_outer_blobs_idx)
saved_blobs_arg = caffe2_pb2.Argument()
saved_blobs_arg.name = "saved_fwd_blobs"
saved_blobs_arg.strings.extend(
[b.encode('utf-8') for b in saved_fwd_blobs])
del gradient_do_def.arg[:]
gradient_do_def.arg.extend([
net_arg, inner_blobs_arg, outer_blobs_idx_arg, saved_blobs_arg])
del gradient_do_def.control_input[:]
gradient_do_def.is_gradient_op = True
return gradient_do_def
def _gen_grad_zero_init_ops(grad_map, grad_output_names):
grad_zero_init_ops = []
for grad_output in grad_output_names:
# get the corresponding output name blob and use it in ConstantFill
# so that grad_output has the same shape
output_name = None
for o, g in grad_map.items():
if g == grad_output:
output_name = o
break
assert output_name, "Unknown gradient output " + grad_output
grad_zero_init_op = caffe2_pb2.OperatorDef()
grad_zero_init_op.type = "ConstantFill"
grad_zero_init_op.input.extend([output_name])
grad_zero_init_op.output.extend([grad_output])
value_arg = caffe2_pb2.Argument()
value_arg.name = "value"
value_arg.f = 0.0
grad_zero_init_op.arg.extend([value_arg])
grad_zero_init_ops.append(grad_zero_init_op)
return grad_zero_init_ops
def _prepare_gradient_if_op(
fwd_op, input_names, output_names, then_grad_net, else_grad_net):
gradient_if_def = caffe2_pb2.OperatorDef()
gradient_if_def.CopyFrom(fwd_op)
del gradient_if_def.input[:]
gradient_if_def.input.extend(input_names)
del gradient_if_def.output[:]
gradient_if_def.output.extend(output_names)
then_net_arg = caffe2_pb2.Argument()
then_net_arg.name = "then_net"
then_net_arg.n.CopyFrom(then_grad_net)
gradient_args = [then_net_arg]
if else_grad_net:
else_net_arg = caffe2_pb2.Argument()
else_net_arg.name = "else_net"
else_net_arg.n.CopyFrom(else_grad_net)
gradient_args.append(else_net_arg)
del gradient_if_def.arg[:]
gradient_if_def.arg.extend(gradient_args)
if gradient_if_def.name:
gradient_if_def.name += "_grad"
del gradient_if_def.control_input[:]
gradient_if_def.is_gradient_op = True
return gradient_if_def
| {
"content_hash": "3b7ffca34fa01c420ca9677908caa29f",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 84,
"avg_line_length": 40.73695198329854,
"alnum_prop": 0.6330651360631374,
"repo_name": "pietern/caffe2",
"id": "3efbf86869c961c8dd2dbf16e8bf121775024e71",
"size": "20253",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "caffe2/python/control_ops_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5415"
},
{
"name": "C",
"bytes": "316608"
},
{
"name": "C++",
"bytes": "4743501"
},
{
"name": "CMake",
"bytes": "139649"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "671183"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Metal",
"bytes": "36752"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Objective-C++",
"bytes": "239139"
},
{
"name": "Python",
"bytes": "2902249"
},
{
"name": "Shell",
"bytes": "31734"
}
],
"symlink_target": ""
} |
import time
import unittest
import config
import mle
import node
LEADER = 1
ROUTER = 2
SNIFFER = 3
class Cert_5_1_13_RouterReset(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1, 3):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
self.sniffer = config.create_default_thread_sniffer(SNIFFER)
self.sniffer.start()
def tearDown(self):
self.sniffer.stop()
del self.sniffer
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
time.sleep(4)
self.nodes[ROUTER].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
rloc16 = self.nodes[ROUTER].get_addr16()
self.nodes[ROUTER].stop()
time.sleep(5)
self.nodes[ROUTER].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER].get_addr16(), rloc16)
leader_messages = self.sniffer.get_messages_sent_by(LEADER)
router1_messages = self.sniffer.get_messages_sent_by(ROUTER)
# 1 - All
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
msg = leader_messages.next_coap_message("2.04")
router1_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
msg = leader_messages.next_mle_message_of_one_of_command_types(mle.CommandType.LINK_ACCEPT_AND_REQUEST,
mle.CommandType.LINK_ACCEPT)
self.assertIsNotNone(msg)
# 2 - Router1 / Leader
msg = router1_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::1")
msg.assertMleMessageContainsTlv(mle.SourceAddress)
msg.assertMleMessageContainsTlv(mle.LeaderData)
msg.assertMleMessageContainsTlv(mle.Route64)
msg = leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::1")
msg.assertMleMessageContainsTlv(mle.SourceAddress)
msg.assertMleMessageContainsTlv(mle.LeaderData)
msg.assertMleMessageContainsTlv(mle.Route64)
# 4 - Router1
msg = router1_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
msg.assertSentToDestinationAddress("ff02::2")
msg.assertMleMessageContainsTlv(mle.Challenge)
msg.assertMleMessageContainsTlv(mle.Version)
msg.assertMleMessageContainsTlv(mle.TlvRequest)
tlv_request = msg.get_mle_message_tlv(mle.TlvRequest)
self.assertIn(mle.TlvType.ROUTE64, tlv_request.tlvs)
self.assertIn(mle.TlvType.ADDRESS16, tlv_request.tlvs)
# 5 - Leader
msg = leader_messages.next_mle_message(mle.CommandType.LINK_ACCEPT)
msg.assertSentToNode(self.nodes[ROUTER])
msg.assertMleMessageContainsTlv(mle.SourceAddress)
msg.assertMleMessageContainsTlv(mle.LeaderData)
msg.assertMleMessageContainsTlv(mle.Response)
msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
msg.assertMleMessageContainsOptionalTlv(mle.MleFrameCounter)
msg.assertMleMessageContainsTlv(mle.Address16)
msg.assertMleMessageContainsTlv(mle.Version)
msg.assertMleMessageContainsTlv(mle.Route64)
msg.assertMleMessageContainsOptionalTlv(mle.Challenge)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1efb75faf85178e762c0add398cdbf02",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 111,
"avg_line_length": 36.784,
"alnum_prop": 0.6718138321009134,
"repo_name": "GiedriusM/openthread",
"id": "b9e3743a4f5707257bfb5761c4772ca73dd66cdb",
"size": "6198",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_5_1_13_RouterReset.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9623"
},
{
"name": "C",
"bytes": "352912"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "2747855"
},
{
"name": "M4",
"bytes": "34798"
},
{
"name": "Makefile",
"bytes": "76131"
},
{
"name": "Python",
"bytes": "1010387"
},
{
"name": "Shell",
"bytes": "14997"
}
],
"symlink_target": ""
} |
from nltk.stem import SnowballStemmer
import nltk
#Creates tokens
def makeTokensList(text):
tokens=nltk.word_tokenize(text)
return tokens
#Gets tokens stems
def appendTokensStem(tokensList):
#To get stem associated to word
stemmedTokens=[]
stemmer=SnowballStemmer("spanish")
#To count tokens and types
numTokens=0
numStems=0
#To get stems frequency
stemsList=[]
stemsFrequency={}
for token in tokensList:
numTokens+=1
st={}
st["word"]=token
stem=stemmer.stem(token)
st["stem"]=stem
stemmedTokens.append(st)
if (stem in stemsList):
stemsFrequency[stem]+=1
else:
stemsList.append(stem)
stemsFrequency[stem]=1
numStems+=1
return stemmedTokens, numTokens, numStems, stemsFrequency.items()
| {
"content_hash": "fa9355583aca281771ca9713ad560322",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 23.10810810810811,
"alnum_prop": 0.6362573099415205,
"repo_name": "CesarCardam/CesarCardam.github.io",
"id": "e6ec368f0dd0df1e17bb2e6b9973e32ff344f92e",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/WordRelation/Python/Utils/tokenizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "523440"
},
{
"name": "HTML",
"bytes": "17122"
},
{
"name": "JavaScript",
"bytes": "582952"
},
{
"name": "Python",
"bytes": "14368"
}
],
"symlink_target": ""
} |
"""
tests for default credential manager using gitconfig
"""
import os
import unittest
import tempfile
from cirrus.plugins.creds.default import Default
from cirrus._2to3 import ConfigParser
class DefaultCredsTests(unittest.TestCase):
"""
test default plugin gitconfig access
"""
def setUp(self):
"""set up a test gitconfig"""
self.dir = tempfile.mkdtemp()
self.gitconfig = os.path.join(self.dir, '.gitconfig')
gitconf = ConfigParser.RawConfigParser()
gitconf.add_section('cirrus')
gitconf.set('cirrus', 'credential-plugin', 'default')
gitconf.set('cirrus', 'github-user', 'steve')
gitconf.set('cirrus', 'github-token', 'steves token')
with open(self.gitconfig, 'w') as handle:
gitconf.write(handle)
def tearDown(self):
"""cleanup"""
if os.path.exists(self.dir):
os.system('rm -rf {0}'.format(self.dir))
def test_reading_gitconfig(self):
"""test reading in the fake gitconfig and accessing data"""
plugin = Default(gitconfig_file=self.gitconfig)
gh = plugin.github_credentials()
self.failUnless('github_user' in gh)
self.failUnless('github_token' in gh)
self.assertEqual(gh['github_user'], 'steve')
self.assertEqual(gh['github_token'], 'steves token')
# test all the methods work with defaults
for n, m in plugin.credential_methods():
m()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "cacff8bcf3a6c2bea12f68ee5d0115ed",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 67,
"avg_line_length": 30.897959183673468,
"alnum_prop": 0.6274768824306473,
"repo_name": "evansde77/cirrus",
"id": "33de35248b96be4e0051f16ac9456f11e355c64a",
"size": "1536",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/cirrus/default_creds_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5952"
},
{
"name": "HTML",
"bytes": "2855"
},
{
"name": "Python",
"bytes": "474730"
},
{
"name": "Shell",
"bytes": "27086"
}
],
"symlink_target": ""
} |
import os
def numCPUs():
if not hasattr(os, 'sysconf'):
raise RuntimeError('No sysconf detected.')
return os.sysconf('SC_NPROCESSORS_ONLN')
bind = '0.0.0.0:8001'
workers = 4
# backlog = 2048
# worker_class = 'sync'
worker_class = 'gevent'
debug = True
daemon = True
pidfile = '/tmp/gunicorn.pid'
logfile = '/tmp/gunicorn.log'
# gunicorn -c config-gunicorn.py views:app
| {
"content_hash": "7000173c0e557e9f11a7ab7a4d4a12e3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 50,
"avg_line_length": 20.473684210526315,
"alnum_prop": 0.6735218508997429,
"repo_name": "8thmatyr/evnt-web",
"id": "cc664ea3d4b7d2628c64a0a4e85cc5a5a407db99",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/conf/config-gunicorn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('movielists', '0022_auto_20150422_1227'),
]
operations = [
migrations.AddField(
model_name='movie',
name='total_rating',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
| {
"content_hash": "08acec4e1f7ba4c2c886f4a0d63e7b27",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 50,
"avg_line_length": 22,
"alnum_prop": 0.5909090909090909,
"repo_name": "kiriakosv/movie-recommendator",
"id": "bbab7dabf6105623948d7f3be957121923ff7e00",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviesite/movielists/migrations/0023_movie_total_rating.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2101"
},
{
"name": "HTML",
"bytes": "9505"
},
{
"name": "Python",
"bytes": "36726"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'AnswerGroup.answer'
db.delete_column(u'questionnaire_answergroup', 'answer_id')
# Adding M2M table for field answer on 'AnswerGroup'
db.create_table(u'questionnaire_answergroup_answer', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('answergroup', models.ForeignKey(orm['questionnaire.answergroup'], null=False)),
('answer', models.ForeignKey(orm['questionnaire.answer'], null=False))
))
db.create_unique(u'questionnaire_answergroup_answer', ['answergroup_id', 'answer_id'])
def backwards(self, orm):
# Adding field 'AnswerGroup.answer'
db.add_column(u'questionnaire_answergroup', 'answer',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.Answer'], null=True),
keep_default=False)
# Removing M2M table for field answer on 'AnswerGroup'
db.delete_table('questionnaire_answergroup_answer')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'questionnaire.answergroup': {
'Meta': {'object_name': 'AnswerGroup'},
'answer': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['questionnaire.Answer']", 'null': 'True', 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionGroup']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'questionnaire.comment': {
'Meta': {'object_name': 'Comment'},
'answer_group': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.AnswerGroup']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.dateanswer': {
'Meta': {'object_name': 'DateAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DateField', [], {})
},
'questionnaire.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionOption']"})
},
'questionnaire.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
'questionnaire.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'UID': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '6'}),
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'short_instruction': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'questionnaire.questiongroup': {
'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_group'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}),
'question': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['questionnaire.Question']", 'symmetrical': 'False'}),
'subsection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'to': "orm['questionnaire.SubSection']"})
},
'questionnaire.questiongrouporder': {
'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroupOrder'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Question']"}),
'question_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'questionnaire.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['questionnaire.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.region': {
'Meta': {'object_name': 'Region'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'null': 'True', 'to': "orm['questionnaire.Organization']"})
},
'questionnaire.section': {
'Meta': {'ordering': "('order',)", 'object_name': 'Section'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['questionnaire.Questionnaire']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.subsection': {
'Meta': {'ordering': "('order',)", 'object_name': 'SubSection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'to': "orm['questionnaire.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.textanswer': {
'Meta': {'object_name': 'TextAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
}
}
complete_apps = ['questionnaire'] | {
"content_hash": "9fe0f642dda39a0c1b6987f7f51690a5",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 188,
"avg_line_length": 79.49532710280374,
"alnum_prop": 0.5694215847636962,
"repo_name": "testvidya11/ejrf",
"id": "27801d86da2e3b435b4dad9efb3cfac797e98d31",
"size": "17036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questionnaire/migrations/0021_auto__del_field_answergroup_answer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6961"
},
{
"name": "JavaScript",
"bytes": "55198"
},
{
"name": "Python",
"bytes": "1196960"
},
{
"name": "Shell",
"bytes": "1160"
}
],
"symlink_target": ""
} |
"""
Webhook endpoint for Senlin v1 ReST API.
"""
from webob import exc
from senlin.api.common import util
from senlin.api.common import wsgi
from senlin.common import consts
from senlin.common.i18n import _
from senlin.common import utils
class ReceiverData(object):
"""The data accompanying a POST request to create a receiver."""
PARAMS = (consts.RECEIVER_NAME, consts.RECEIVER_TYPE,
consts.RECEIVER_CLUSTER, consts.RECEIVER_ACTION,
consts.RECEIVER_ACTOR, consts.RECEIVER_PARAMS)
def __init__(self, data):
self.data = data
def name(self):
if consts.RECEIVER_NAME not in self.data:
raise exc.HTTPBadRequest(_("Missing 'name' in request."))
return self.data.get(consts.RECEIVER_NAME, None)
def cluster_id(self):
if consts.RECEIVER_CLUSTER_ID not in self.data:
raise exc.HTTPBadRequest(_("Missing 'cluster_id' in request."))
return self.data[consts.RECEIVER_CLUSTER_ID]
def type_name(self):
if consts.RECEIVER_TYPE not in self.data:
raise exc.HTTPBadRequest(_("Missing 'type' in request."))
return self.data[consts.RECEIVER_TYPE]
def action(self):
if consts.RECEIVER_ACTION not in self.data:
raise exc.HTTPBadRequest(_("Missing 'action' in request."))
return self.data[consts.RECEIVER_ACTION]
def actor(self):
return self.data.get(consts.RECEIVER_ACTOR, None)
def params(self):
return self.data.get(consts.RECEIVER_PARAMS, None)
class ReceiverController(wsgi.Controller):
"""WSGI controller for receiver resource in Senlin v1 API."""
REQUEST_SCOPE = 'receivers'
@util.policy_enforce
def index(self, req):
filter_whitelist = {
consts.RECEIVER_NAME: 'mixed',
consts.RECEIVER_TYPE: 'mixed',
consts.RECEIVER_CLUSTER_ID: 'mixed',
consts.RECEIVER_ACTION: 'mixed',
}
param_whitelist = {
consts.PARAM_LIMIT: 'single',
consts.PARAM_MARKER: 'single',
consts.PARAM_SORT: 'single',
consts.PARAM_GLOBAL_PROJECT: 'single',
}
for key in req.params.keys():
if (key not in param_whitelist.keys() and key not in
filter_whitelist.keys()):
raise exc.HTTPBadRequest(_('Invalid parameter %s') % key)
params = util.get_allowed_params(req.params, param_whitelist)
filters = util.get_allowed_params(req.params, filter_whitelist)
key = consts.PARAM_LIMIT
if key in params:
params[key] = utils.parse_int_param(key, params[key])
key = consts.PARAM_GLOBAL_PROJECT
if key in params:
show_global = utils.parse_bool_param(key, params[key])
del params[key]
params['project_safe'] = not show_global
if not filters:
filters = None
receivers = self.rpc_client.receiver_list(req.context, filters=filters,
**params)
return {'receivers': receivers}
@util.policy_enforce
def create(self, req, body):
data = body.get('receiver')
if data is None:
raise exc.HTTPBadRequest(_("Malformed request data, missing "
"'receiver' key in request body."))
data = ReceiverData(data)
result = self.rpc_client.receiver_create(req.context,
data.name(),
data.type_name(),
data.cluster_id(),
data.action(),
data.actor(),
data.params())
return {'receiver': result}
@util.policy_enforce
def get(self, req, receiver_id):
receiver = self.rpc_client.receiver_get(req.context, receiver_id)
return {'receiver': receiver}
@util.policy_enforce
def delete(self, req, receiver_id):
self.rpc_client.receiver_delete(req.context, receiver_id, cast=False)
| {
"content_hash": "d86f0f0b51286946b1d4081038a44bcf",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 35.18333333333333,
"alnum_prop": 0.571293225959261,
"repo_name": "tengqm/senlin-container",
"id": "581f567d663fc912e1e98bfc031212e0a6bbef8b",
"size": "4771",
"binary": false,
"copies": "1",
"ref": "refs/heads/container_cluster_support",
"path": "senlin/api/openstack/v1/receivers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2239281"
},
{
"name": "Shell",
"bytes": "18730"
}
],
"symlink_target": ""
} |
"""A Network is a composition of Layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import weakref
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.layers import base
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
# pylint: disable=protected-access
# Explanation for protected-access disable: Network has lots of same-class and
# parent-class references across different objects, and some to private
# functions in base.py which should be reused.
def _network_name_scope_naming(current_variable_scope):
"""Name scope naming to match operation names to variable names.
Used in Networks and also applied to non-Network Layers which are added to
Networks before being built.
Args:
current_variable_scope: A VariableScope object.
Returns:
A name scope name.
"""
return current_variable_scope.name + "/"
_NETWORK_DEPRECATION_MESSAGE = (
"Please inherit from `tf.keras.Model`, and see its documentation for "
"details. `tf.keras.Model` should be a drop-in replacement for "
"`tfe.Network` in most cases, but note that `track_layer` is no longer "
"necessary or supported. Instead, `Layer` instances are tracked on "
"attribute assignment (see the section of `tf.keras.Model`'s documentation "
"on subclassing). Since the output of `track_layer` is often assigned to "
"an attribute anyway, most code can be ported by simply removing the "
"`track_layer` calls.\n\n`tf.keras.Model` works with all TensorFlow "
"`Layer` instances, including those from `tf.layers`, but switching to "
"the `tf.keras.layers` versions along with the migration to "
"`tf.keras.Model` is recommended, since it will preserve variable names. "
"Feel free to import it with an alias to avoid excess typing :).")
class Network(base.Layer):
"""Represents the composition of a set of Layers.
*Deprecated*. Please inherit from `tf.keras.Model`, and see its documentation
for details. `tf.keras.Model` should be a drop-in replacement for
`tfe.Network` in most cases, but note that `track_layer` is no longer
necessary or supported. Instead, `Layer` instances are tracked on attribute
assignment (see the section of `tf.keras.Model`'s documentation on
subclassing). Since the output of `track_layer` is often assigned to an
attribute anyway, most code can be ported by simply removing the `track_layer`
calls.
`tf.keras.Model` works with all TensorFlow `Layer` instances, including those
from `tf.layers`, but switching to the `tf.keras.layers` versions along with
the migration to `tf.keras.Model` is recommended, since it will preserve
variable names. Feel free to import it with an alias to avoid excess typing
:).
`Network` implements the `Layer` interface and adds convenience methods for
managing sub-`Layer`s, such as listing variables.
`Layer`s (including other `Network`s) should be added via `track_layer`. They
can then be used when overriding the `Network.call` method:
```python
class TwoLayerNetwork(tfe.Network):
def __init__(self, name):
super(TwoLayerNetwork, self).__init__(name=name)
self.layer_one = self.track_layer(tf.compat.v1.layers.Dense(16,
input_shape=(8,)))
self.layer_two = self.track_layer(tf.compat.v1.layers.Dense(1,
input_shape=(16,)))
def call(self, inputs):
return self.layer_two(self.layer_one(inputs))
```
After constructing an object and calling the `Network`, a list of variables
created by tracked `Layer`s is available via `Network.variables`:
```python
net = TwoLayerNetwork(name="net")
output = net(tf.ones([1, 8]))
print([v.name for v in net.variables])
```
This example prints variable names, one kernel and one bias per
`tf.compat.v1.layers.Dense` layer:
```
['net/dense/kernel:0',
'net/dense/bias:0',
'net/dense_1/kernel:0',
'net/dense_1/bias:0']
```
These variables can be passed to a `Saver` (`tf.compat.v1.train.Saver`, or
`tf.contrib.eager.Saver` when executing eagerly) to save or restore the
`Network`, typically alongside a global step and
`tf.compat.v1.train.Optimizer`
variables when checkpointing during training.
Note that the semantics of calling a `Network` with graph execution (i.e. not
executing eagerly) may change slightly in the future. Currently stateful ops
are pruned from the graph unless they or something that depends on them is
executed in a session, but this behavior is not consistent with eager
execution (where stateful ops are executed eagerly). `Layer`s from `tf.layers`
do not depend on this pruning and so will not be affected, but `Network`s
which rely on stateful ops being added to the graph but not executed (e.g. via
custom `Layer`s which manage stateful ops) may break with this change.
"""
# TODO(josh11b,ashankar,allenl):
# - Should 'trainable' be changeable on the Network object?
# - Do we allow add_variable in Network?
# - Detect layers used in __call__ that weren't registered with track_layer.
# - Convert inputs to __call__ to tensors.
@deprecation.deprecated(date=None, instructions=_NETWORK_DEPRECATION_MESSAGE)
def __init__(self, name=None):
"""Configure the `Network`.
Args:
name: The name to use for this `Network`. If specified, it must be unique
in the context where this `Network` is first (1) added to another
`Network` (in which case it must not share a name with other `Layers`
added to that `Network`), or (2) built/called (in which case no other
'top-level' `Network`s may share this name). If unspecified or None, the
`Network` will be named using its class name, with a number appended if
necessary for uniqueness (e.g. MyNetwork -> 'my_network_1').
Raises:
ValueError: If `name` is not valid. Note that some naming errors will
instead be raised when the `Network` is called.
"""
if context.executing_eagerly():
logging.warning(
("** tfe.Network is deprecated and will be removed in a future "
"version.\n\n%s"), _NETWORK_DEPRECATION_MESSAGE)
if isinstance(name, variable_scope.VariableScope):
raise ValueError("VariableScopes are not valid Network names.")
if name is not None and "/" in name:
raise ValueError(
"Forward slashes ('/') are not allowed in Network names.")
super(Network, self).__init__(name=name)
self._layers = []
self._sub_layer_name_uids = collections.defaultdict(int)
# Initially None, but set to False for networks which are first built as
# top-level.
self._first_parent = None # A weak reference to our first parent.
self._non_network_sublayers = []
self._owned_layers = {}
# The scope to use if we end up without a parent.
self._default_parent_variable_scope = variable_scope.get_variable_scope()
# Hold on to the variable scope counts from init to check whether a scope
# with the name we want was ever created in our parent scope. Without this
# check we might have name collisions if the parent scope on init gets
# closed before build is called.
self._variable_scope_counts_on_init = (
variable_scope.get_variable_scope_store().variable_scopes_count)
def _gather_saveables_for_checkpoint(self):
raise NotImplementedError(
"tfe.Network does not support object-based checkpointing.\n\n%s" %
_NETWORK_DEPRECATION_MESSAGE)
def _name_scope_name(self, current_variable_scope):
"""Overrides Layer op naming to match variable naming."""
return _network_name_scope_naming(
current_variable_scope=current_variable_scope)
def _init_set_name(self, name):
# Anonymous Networks (name=None) defer setting a final name until they are
# (1) added to another Network, or (2) built/called (where (2) is only used
# for a "top level" network).
#
# However, if we were provided an explicit name (name is not None), that
# will always be the final name of the Network; if it turns out not to be
# unique or if variable names can't be prefixed by it we will throw an
# error.
self._name = name
self._base_name = None
def _finalize_name(self, parent_network):
if not self._name:
# Were were not passed a name explicitly (or it was blank), so this is an
# anonymous Network. We make up a unique name.
if parent_network:
avoid_names = parent_network._owned_layers
name_uid_map = parent_network._sub_layer_name_uids
else:
name_uid_map = backend.get_default_graph_uid_map()
# Figure out which names we have to avoid based on which variable scope
# we're nested in.
strip_name = self._default_parent_variable_scope.name
if strip_name:
strip_name += "/"
def _strip_on_init_scope(name):
if name.startswith(strip_name):
return name[len(strip_name):]
else:
return None
avoid_names = set(
_strip_on_init_scope(name)
for name in self._variable_scope_counts_on_init.keys()
if name)
self._name, self._base_name = self._make_unique_name(
name_uid_map=name_uid_map,
avoid_names=avoid_names,
namespace=self._default_parent_variable_scope.name,
zero_based=True)
if self._first_parent is None or (self._first_parent # False = no parent
and self._first_parent() is None):
# Save a pointer to the parent Network so that we can later check that the
# scope name we get is correct.
if not parent_network:
self._first_parent = parent_network
else:
self._first_parent = weakref.ref(parent_network)
def _set_scope(self, scope=None):
if self._scope is None:
if not self._first_parent:
first_parent = self._first_parent
else:
first_parent = self._first_parent()
if first_parent is None:
# If we were never added to another Network, or that Network has beed
# garbage collected before being called, then we're a top-level Network.
self._finalize_name(
# Use False to make sure the value sticks and we don't inherit a
# parent if we're added to a network later.
parent_network=False)
if scope is not None:
raise ValueError("Networks may not be created with explicit scopes.")
if first_parent:
first_parent._set_scope()
parent_scope = first_parent._scope
else:
parent_scope = self._default_parent_variable_scope
with variable_scope.variable_scope(parent_scope) as parent_vs:
expected_scope_name = parent_vs.name + "/" + self._name
if expected_scope_name in self._variable_scope_counts_on_init:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") %
(self._name,))
# Make sure variables with this prefix will be unique.
with variable_scope.variable_scope(
None, use_resource=True, default_name=self._name) as scope:
self._scope = scope
scope_name = scope.name
suffix_start = scope_name.rfind("/") + 1
# rfind is -1 if there is no slash in the string, in which case the
# suffix starts at the beginning of the string (there is no prefix).
scope_suffix = scope_name[suffix_start:]
scope_prefix = scope_name[:suffix_start]
if scope_suffix != self._name:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") %
(self._name,))
if (first_parent and scope_prefix[:-1] != first_parent.scope_name):
raise ValueError(
("Network variable names must match a nesting of sub-Network "
"names. Expected prefix '%s' from parent network, but got "
"'%s' when attempting to create a variable_scope for Network "
"'%s'. Likely an explicit variable_scope was inserted into "
"the nesting.") %
(first_parent.scope_name, scope_prefix[:-1], self._name))
elif not first_parent and scope_prefix:
# For the case when this Network is not nested inside any other
# Network, but is in a variable_scope. This Network's name takes on
# the full variable scope prefix.
self._name = scope_name
for non_network_sublayer in self._non_network_sublayers:
self._set_scope_for_nonnetwork_sublayer(non_network_sublayer)
def _set_scope_for_nonnetwork_sublayer(self, sublayer):
if sublayer._scope is None:
if sublayer._first_parent is None:
constituent_first_parent = None
else:
constituent_first_parent = sublayer._first_parent()
if constituent_first_parent:
constituent_first_parent._set_scope()
parent_scope = constituent_first_parent._scope
else:
self._finalize_name(False)
raise ValueError(
("The parent of a Layer added to Network %s was garbage collected "
"before the Layer was built. If this limitation bothers you "
"please file a feature request.") % (self.name,))
with variable_scope.variable_scope(parent_scope):
# Horrid hack to make Layer variable names which are direct
# sub-layers of Networks conform to the Network variable naming
# conventions.
with variable_scope.variable_scope(
None, use_resource=True, default_name=sublayer.name) as sub_scope:
sublayer._scope = sub_scope
# Also switch op naming for this Layer to match Network conventions,
# i.e. op naming matching variable naming.
sublayer._name_scope_name = _network_name_scope_naming
@base.Layer.name.getter
def name(self):
if self._name is None:
raise ValueError(
"The network does not yet have a final name, but a name was "
"requested for it. Networks get a name when they are added to "
"another Network via track_layer, or when they are first "
"called/built.")
return self._name
def track_layer(self, layer):
"""Track a Layer in this Network.
`Network` requires that all `Layer`s used in `call()` be tracked so that the
`Network` can export a complete list of variables.
Args:
layer: A `tf.compat.v1.layers.Layer` object.
Returns:
The passed in `layer`.
Raises:
RuntimeError: If __init__ has not been called.
TypeError: If `layer` is the wrong type.
ValueError: If a `Layer` with the same name has already been added.
"""
if not hasattr(self, "_layers"):
raise RuntimeError("Need to call Network.__init__ before adding layers")
if not isinstance(layer, base.Layer):
raise TypeError(
"Network.track_layer() passed type %s, not a tf.layers.Layer" %
(type(layer),))
# Always use `ResourceVariable` with legacy layers.
layer._use_resource_variables = True
if isinstance(layer, Network):
layer._finalize_name(parent_network=self)
else:
# `layer` is a non-Network, so it hasn't been named to follow Network
# conventions for contained Layers (i.e. the same conventions as for
# sub-Networks). This renaming is necessary to isolate Network variable
# naming from Layers constructed outside the Network and never added to it
# (because Layers are named globally).
if not layer.built:
if not hasattr(layer, "_first_parent"):
dereferenced_layer_first_parent = None
else:
dereferenced_layer_first_parent = layer._first_parent()
if dereferenced_layer_first_parent is None:
if layer._name != layer._base_name:
# If name and base_name do not match, then this Layer used anonymous
# naming and we have to rename it. Otherwise there's an explicit
# name, and we should respect it (subject to error checking).
layer._name, layer._base_name = layer._make_unique_name(
name_uid_map=self._sub_layer_name_uids,
avoid_names=self._owned_layers,
zero_based=True
# No namespace required, since we've specified our own UID map.
)
layer._first_parent = weakref.ref(self)
self._non_network_sublayers.append(layer)
if (not layer.built and layer._first_parent and
self is layer._first_parent()):
if layer.name in self._owned_layers:
if self._owned_layers[layer.name] is layer:
return layer
raise ValueError(
"Attempt to add two Layers with the name '%s' to the same Network."
% (layer.name))
self._owned_layers[layer.name] = layer
self._layers.append(layer)
return layer
def get_layer(self, name=None, index=None):
"""Get a contained `tf.compat.v1.layers.Layer` either by name or index.
Args:
name: String matching one of the names of a contained `Layer`. Note that
the names of `Layer`s added to `Network`s may not be unique when doing
layer sharing (i.e. adding a `Layer` to this `Network` which was already
added to another `Network`). The lowest index `Layer` with a matching
name will be returned.
index: Integer in [0, number of layers). Layers are assigned an index by
the order they are added.
Returns:
A `tf.compat.v1.layers.Layer` object.
Raises:
ValueError: If neither or both of 'index' or 'name' is specified, or the
lookup failed.
"""
if index is not None:
if name is not None:
raise ValueError("Exactly one of 'index' or 'name' must be provided")
if len(self._layers) <= index:
raise ValueError("Was asked to retrieve layer at index " + str(index) +
" but model only has " + str(len(self._layers)) +
" layers.")
else:
return self._layers[index]
else:
if not name:
raise ValueError("Provide either a layer name or layer index.")
for layer in self._layers:
if layer.name == name:
return layer
raise ValueError("No such layer: " + name)
# The following methods are for implementing the Layer interface.
@property
def weights(self):
# TODO(josh11b): Should this return a set or perform de-duplication of
# variables in the case of shared layers/variables that appear in
# multiple places in the Network?
weights = []
for layer in self._layers:
weights += layer.weights
return weights
@property
def trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.non_trainable_weights
return weights
@property
def trainable(self):
return True
@trainable.setter
def trainable(self, value):
if not value:
# We believe it better to decide which layers & networks are trainable
# at the Trainer level than here. Otherwise you can run into trouble if a
# layer/network is shared between two models, but is trainable in one
# but not the other (like with adversarial networks).
raise AttributeError("cannot mark Network as not trainable")
@property
def layers(self):
return self._layers
def add_variable(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
raise RuntimeError(
"add_variable not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
def add_loss(self, losses, inputs=None):
raise RuntimeError(
"add_loss is not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
@property
def losses(self):
"""Gather losses from `Layer`s in the `Network`.
Note that when executing eagerly, `Layer.losses` evaluates
regularizers. When using graph execution, variable regularization ops have
already been created and are simply returned here.
Returns:
A list of tensors.
"""
layer_losses = []
for layer in self.layers:
layer_losses.extend(layer.losses)
return layer_losses
# TODO(allenl): Support other Layer methods needed for graph mode, such as for
# updates
class Sequential(Network):
"""Represents a linear sequence of Layers or functions.
The output of each layer/function is provided as the input to the next.
The inputs passed to `__call__` are passed to the inputs of the first
Layer, and it returns the outputs of the last Layer.
Args:
layers_funcs: An optional sequence where each element is either a
tf.compat.v1.layers.Layer object or a callable.
name: An optional string name to use for this Network.
"""
def __init__(self, layers_funcs=None, name=None):
super(Sequential, self).__init__(name=name)
self._layers_funcs = []
if layers_funcs:
for l in layers_funcs:
self.add(l)
def add(self, layer_func):
if isinstance(layer_func, base.Layer):
args = function_utils.fn_args(layer_func.call)
self.track_layer(layer_func)
elif callable(layer_func):
args = function_utils.fn_args(layer_func)
else:
raise TypeError(
"Sequential.add() takes only tf.layers.Layer objects or callables; "
"not '%s' of type '%s'." % (layer_func, type(layer_func)))
self._layers_funcs.append((("training" in args), layer_func))
def call(self, inputs, training=None):
"""Call each Layer in the order they were added."""
# TODO(josh11b): Support "mode" and maybe other arguments
if training is None:
for _, l in self._layers_funcs:
inputs = l(inputs)
else:
for has_training_arg, l in self._layers_funcs:
if has_training_arg:
inputs = l(inputs, training)
else:
inputs = l(inputs)
return inputs
_DeferredRestoration = collections.namedtuple(
"_DeferredRestoration",
[
# The map_func to use (either user-specified or the default).
"map_func",
# Boolean, True if the user specified an explicit map_func, for error
# messages.
"map_func_is_user",
# A mapping from checkpoint names to initial values of not-yet-created
# variables which should be restored. These values come from parsing a
# checkpoint.
"checkpointed_variables_to_restore",
# A mapping from checkpoint name to variable objects of variables which
# have already been restored, for error checking.
"restored_variables",
# The session to restore with (if in graph mode).
"session",
# Names of the Network where the restore was requested, for error
# messages.
"network_name",
"network_scope_name"
])
def _default_naming_conflict_error_message(mapped_name, first_variable,
second_variable, network_name,
network_scope_name):
return (
("The default checkpoint variable name mapping strategy for Network "
"'%s' resulted in a naming conflict. We attempted to strip off the "
"variable prefix for the Network ('%s'), but this resulted in two "
"variables named '%s' (originally '%s' and '%s'). This should only "
"happen when using variable sharing (i.e. the Network contains Networks "
"or Layers which were first added to another Network, and therefore "
"have that Network's variable prefix). One solution is to pass "
"`map_func=lambda n: n` to save and restore to use fully qualified "
"variable names in the checkpoint, although this will require that the "
"variable prefix of the Network being restored into is also '%s'. You "
"may alternatively write an arbitrary mapping.") %
(network_name, network_scope_name, mapped_name,
first_variable._shared_name, second_variable._shared_name,
network_scope_name))
def _restore_custom_map_func_error_message(mapped_name, first_variable,
second_variable, network_name,
network_scope_name):
return (
("The map_func passed to restore_network_checkpoint for the Network '%s' "
"resulted in two variables named '%s' (originally '%s' and '%s'). Since "
"this is also an error when saving, this Network was "
"probably not saved with this map_func. Note that map_func "
"always maps from full variable names to checkpoint names; "
"there is no need to specify an inverse mapping.\n\n"
"Try stripping less from the variable names, or renaming parts "
"of the Network. For reference, variables created by sub-Layers "
"of this Network are prefixed with '%s', but if they are "
"re-used after being added to another Network they will have "
"that Network's full variable prefix instead.") %
(network_name, mapped_name, first_variable._shared_name,
second_variable._shared_name, network_scope_name))
def _make_custom_getter_for_deferred_restorations():
"""Returns a custom getter which searches `deferred_restorations`.
Returns: A tuple of (_custom_getter, deferred_restorations)
_custom_getter: The getter which should be added to variable_scopes where
variables will be created.
deferred_restorations: A list for _DeferredRestoration objects. Typically
empty when the getter is set, and expanded as deferred restorations are
requested. All new deferred restorations should be appended to the end of
the list, where they will have priority over older deferred restorations.
"""
deferred_restorations = []
def _custom_getter(getter,
name,
shape=None,
dtype=None,
initializer=None,
*args,
**kwargs):
"""A custom getter which processes deferred restorations."""
# Iterate over restorations, newest first (newer restorations will take
# precedence over older restorations, just like with immediate restorations
# into existing variables).
delayed_restoration = None
found_value = False
value_to_restore = None
for delayed_restoration in reversed(deferred_restorations):
checkpoint_name = delayed_restoration.map_func(name)
if (checkpoint_name in
delayed_restoration.checkpointed_variables_to_restore):
found_value = True
value_to_restore = (
delayed_restoration
.checkpointed_variables_to_restore[checkpoint_name])
if found_value:
break
# value_to_restore may be False because this variable is not in any
# checkpoint we are restoring, or None because we have explicitly set it to
# None when it was previously fetched. In either case, we don't need to
# set an initializer.
if found_value and value_to_restore is not None:
initializer = value_to_restore
shape = None
variable = getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
*args,
**kwargs)
if found_value and value_to_restore is not None:
# Mark as already restored from this checkpoint.
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name] = None
if not context.executing_eagerly():
delayed_restoration.session.run(variable.initializer)
if found_value:
# Error checking should run even if we've already restored a value.
if delayed_restoration.restored_variables.setdefault(
checkpoint_name, variable) is not variable:
# Naming conflict. We've tried to initialize two variables with the
# same value from the checkpoint.
if delayed_restoration.map_func_is_user:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration
.restored_variables[checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
else:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration
.restored_variables[checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
return variable
return _custom_getter, deferred_restorations
def _make_prefix_stripping_map_fn(scope_name):
"""Closure for stripping the scope name of a Network.
Implemented as a closure rather than a member function to avoid reference
cycles in deferred restorations (this function should not have a reference to
the Network which created it).
Args:
scope_name: The Network.scope_name to strip from variables.
Returns:
A scope_name-stripping default `map_fn` for the Network.
"""
def _strip_variable_prefix(original_variable_name):
"""The default map_func for saving or restoring variables.
Strips the variable prefix for the Network on which save/restore was called,
and leaves other variable names fully qualified in the checkpoint.
Args:
original_variable_name: The _shared_name of the variable (no :0 suffix) to
map.
Returns:
The checkpoint name of the variable.
"""
scope_name_with_slash = scope_name + "/"
if original_variable_name.startswith(scope_name_with_slash):
return original_variable_name[len(scope_name_with_slash):]
else:
return original_variable_name
return _strip_variable_prefix
@deprecation.deprecated(
date=None,
instructions=(
"Please inherit from tf.keras.Model instead of tfe.Network, and use "
"tf.keras.Model.save_weights."))
def save_network_checkpoint(network, save_path, global_step=None,
map_func=None):
"""Save variables from the Network to a checkpoint.
Args:
network: A Network object to save.
save_path: Either a checkpoint prefix or the name of a directory to save the
checkpoint in (in which case the checkpoint will be named based on the
Network name).
global_step: The global step to use when naming the checkpoint. If None
(default), we will first try to get the default global step. If that fails
because no default global step exists, then the checkpoint is created
without a global step suffix.
map_func: A function mapping fully qualified variable names (e.g.
'my_network_1/dense_1/kernel') to names in the checkpoint. By default (if
`map_func=None`), the variable prefix for the network being restored
(`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped and all
other variable names (shared with other Networks) are left unchanged.
Returns:
The checkpoint prefix for the saved checkpoint, which may be passed to
`Network.restore`.
Raises:
ValueError: If the Network has not yet been called, or if map_func results
in a name collision.
"""
if not network.built:
raise ValueError(
"Attempt to save the Network before it was first called. This means "
"variables have not yet been created, so there is nothing to save.")
network._set_scope() # scope_name should be available to map_funcs
if global_step is None:
global_step = training_util.get_global_step()
if os.path.isdir(save_path):
# If we were passed a directory, default to naming based on the Network
# name.
save_path = os.path.join(save_path, network.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(network.scope_name)
variable_map = {}
for variable in network.variables:
mapped_name = map_func(variable._shared_name)
if variable_map.setdefault(mapped_name, variable) is not variable:
if user_map_func is None:
# Instead of erroring out, we could just re-try and silently use the
# full variable names in the checkpoint. This could be odd for deeply
# nested sub-Networks (since the full prefix from the nesting would
# get added), so for now we'll let the user deal with this case.
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=mapped_name,
first_variable=variable_map[mapped_name],
second_variable=variable,
network_name=network.name,
network_scope_name=network.scope_name))
else:
# The user passed their own problematic map_func.
raise ValueError(
("The map_func passed to save_network_checkpoint for the Network "
"'%s' resulted in two variables named '%s' ('%s' and '%s'). Try "
"stripping less from the variable names, or renaming parts of "
"the Network. For reference, variables created by sub-Layers of "
"this Network are prefixed with '%s', but if they are re-used "
"after being added to another Network, they will have that "
"Network's full variable prefix instead.") %
(network.name, mapped_name, variable_map[mapped_name]._shared_name,
variable._shared_name, network.scope_name))
if context.executing_eagerly():
sess = None
else:
sess = ops.get_default_session()
return saver_lib.Saver(variable_map).save(
sess=sess,
save_path=save_path,
write_meta_graph=False,
global_step=global_step)
def _add_deferred_restoration(layer, deferred_restoration):
"""Add a deferred restoration to this Layer and all children.
Restorations which are requested later have higher priority, and the highest
priority matching restoration is applied to a variable when it is created.
Args:
layer: The Layer (may not be a Network) to operate on.
deferred_restoration: A _DeferredRestoration object.
"""
# Networks don't create variables at the moment, so this append isn't strictly
# necessary. We could get by with only adding deferred restorations to
# non-Network Layers.
if isinstance(layer, Network):
layer._set_scope()
# Make sure this Layer has a deferred restoration queue and a custom getter,
# then add our request to it.
if not hasattr(layer, "_custom_getter"):
assert not hasattr(layer, "_deferred_restorations")
layer._custom_getter, layer._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
# We use set_custom_getter because it avoids recursively calling up the
# variable_scope tree. We've done the tree traversal ourselves and have added
# the request to each Layer which needs it.
layer._scope.set_custom_getter(layer._custom_getter)
layer._deferred_restorations.append(deferred_restoration)
if isinstance(layer, Network):
for sublayer in layer.layers:
if not isinstance(sublayer, Network):
layer._set_scope_for_nonnetwork_sublayer(sublayer)
_add_deferred_restoration(sublayer, deferred_restoration)
def _restore_existing_variables(network, save_path, map_func, user_map_func):
"""Use a standard Saver to restore existing variables from a checkpoint.
Args:
network: A Network object to restore.
save_path: The checkpoint prefix or directory to read from.
map_func: The function to use when mapping from variable names to checkpoint
names.
user_map_func: The original map_func passed by the user, for error checking.
Returns:
A dictionary mapping from checkpoint names to variable objects which have
been restored (for bookkeeping to avoid deferred restorations on these
variables).
Raises:
ValueError: If there is a name collision.
"""
existing_variables_by_checkpoint_name = {}
for variable in network.variables:
checkpoint_name = map_func(variable._shared_name)
if existing_variables_by_checkpoint_name.setdefault(
checkpoint_name, variable) is not variable:
if user_map_func is None:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=network.name,
network_scope_name=network.scope_name))
else:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=network.name,
network_scope_name=network.scope_name))
if existing_variables_by_checkpoint_name:
if context.executing_eagerly():
sess = None
else:
sess = ops.get_default_session()
saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore(
sess=sess, save_path=save_path)
return existing_variables_by_checkpoint_name
def _set_restore_on_create(network, save_path, map_func, user_map_func,
existing_variables_by_checkpoint_name):
"""If necessary, request deferred restorations of variables."""
checkpoint_reader = checkpoint_utils.load_checkpoint(save_path)
checkpointed_variables_to_restore = {}
for checkpoint_name, _ in checkpoint_utils.list_variables(save_path):
if checkpoint_name in existing_variables_by_checkpoint_name:
# This variable was already created and restored.
continue
# Save the variable for later restoration in a custom getter.
checkpointed_variables_to_restore[checkpoint_name] = (
checkpoint_reader.get_tensor(checkpoint_name))
# Only set a deferred restoration if there are checkpoint variables which
# have not been assigned to existing variables. Note that this loses out on
# some opportunity for error checking, but avoids creating
# _DeferredRestoration objects once a Network has been built (so that
# restoring in a loop does not take increasing amounts of memory).
if checkpointed_variables_to_restore:
if context.executing_eagerly():
sess = None
else:
sess = ops.get_default_session()
# We need a name for error messages. If we haven't been added to another
# Network yet, we're top-level.
network._finalize_name(False)
network._set_scope()
# Save a record of this restoration for use in the custom getter.
deferred_restoration = _DeferredRestoration(
map_func=map_func,
map_func_is_user=(user_map_func is not None),
checkpointed_variables_to_restore=checkpointed_variables_to_restore,
restored_variables={},
session=sess,
network_name=network.name,
network_scope_name=network.scope_name)
# Add the deferred registration to non-Network children, and request that
# Networks propagate the request to their children.
_add_deferred_restoration(network, deferred_restoration)
@deprecation.deprecated(
date=None,
instructions=(
"Please inherit from tf.keras.Model instead of tfe.Network, and use "
"tf.keras.Model.load_weights."))
def restore_network_checkpoint(network, save_path, map_func=None):
"""Restore the Network from a checkpoint.
If variables have already been created (typically when some or all of the
`Network` is built), they are assigned values from the checkpoint immediately,
overwriting any existing values (in graph mode the default session is used for
the assignments).
If there are checkpoint entries which do not correspond to any existing
variables in the `Network`, these values are saved for deferred restoration;
their initial values will be the checkpointed values once they are
created. Requests for multiple deferred restorations behave the same way as
immediate restorations, in that later requests will take priority over earlier
requests relevant to the same variable.
If this `Network` shares `Layer`s with another network, those `Layer`s will
also have their variables restored from the checkpoint.
Args:
network: A Network object to restore.
save_path: The return value of `tfe.save_network_checkpoint`, or a directory
to search for a checkpoint.
map_func: A function mapping fully qualified variable names (e.g.
'my_network_1/dense_1/kernel') to names in the checkpoint. By default (if
`map_func=None`), the variable prefix for the network being restored
(`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped and all
other variable names (shared with other Networks) are left unchanged. Note
that this is the _same_ map_func as `tfe.save_network_checkpoint`, not an
inverse mapping.
"""
network._finalize_name(parent_network=False)
network._set_scope() # scope_name should be available to map_funcs
if os.path.isdir(save_path):
# If we don't have a name yet, set no parent.
save_path = os.path.join(save_path, network.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(network.scope_name)
# Step one is to restore any existing variables from the checkpoint.
existing_variables_by_checkpoint_name = _restore_existing_variables(
network=network,
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func)
# Step two is to set a custom getter which restores variables on creation,
# for those variables which have not been added to sub-Layers yet.
_set_restore_on_create(
network=network,
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func,
existing_variables_by_checkpoint_name=(
existing_variables_by_checkpoint_name))
| {
"content_hash": "7554cbf5810dc697de52cd8ef2554304",
"timestamp": "",
"source": "github",
"line_count": 1013,
"max_line_length": 80,
"avg_line_length": 42.91806515301086,
"alnum_prop": 0.6677937252737143,
"repo_name": "ghchinoy/tensorflow",
"id": "363e2191c3dfd5cf19b938dbf5d13e2cd4e55cdf",
"size": "44165",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/eager/python/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
"""
This is an example dag for using the WinRMOperator.
"""
from __future__ import annotations
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
import os
from datetime import datetime, timedelta
from airflow import DAG
# --------------------------------------------------------------------------------
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
try:
from airflow.operators.empty import EmptyOperator
except ModuleNotFoundError:
from airflow.operators.dummy import DummyOperator as EmptyOperator # type: ignore
from airflow.providers.microsoft.winrm.hooks.winrm import WinRMHook
from airflow.providers.microsoft.winrm.operators.winrm import WinRMOperator
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "POC_winrm_parallel"
with DAG(
dag_id=DAG_ID,
schedule="0 0 * * *",
start_date=datetime(2021, 1, 1),
dagrun_timeout=timedelta(minutes=60),
tags=["example"],
catchup=False,
) as dag:
run_this_last = EmptyOperator(task_id="run_this_last")
# [START create_hook]
winRMHook = WinRMHook(ssh_conn_id="ssh_POC1")
# [END create_hook]
# [START run_operator]
t1 = WinRMOperator(task_id="wintask1", command="ls -altr", winrm_hook=winRMHook)
t2 = WinRMOperator(task_id="wintask2", command="sleep 60", winrm_hook=winRMHook)
t3 = WinRMOperator(task_id="wintask3", command="echo 'luke test' ", winrm_hook=winRMHook)
# [END run_operator]
[t1, t2, t3] >> run_this_last
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"content_hash": "1d3b519e9b5a4a9534091a52cf00fd51",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 93,
"avg_line_length": 33.292307692307695,
"alnum_prop": 0.6058225508317929,
"repo_name": "apache/airflow",
"id": "85e64caed1d6f599a8a4fc452968202e40cdfe9e",
"size": "2951",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/system/providers/microsoft/winrm/example_winrm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
from sqlalchemy import text
from yuuhpizzakebab import db
def get_toppings():
result = db.engine.execute('select * from Topping')
toppings = []
for r in result:
toppings.append(Topping(r[0], r[1], r[2]))
return toppings
def get_topping(id):
t = text('select * from Topping where id = :topping_id')
topping_results = db.engine.execute(t, topping_id=id)
if not topping_results:
return None
for r in topping_results:
return Topping(r[0], r[1], r[2])
def delete_topping(id):
t = text('delete from Topping where id = :topping_id')
db.engine.execute(t, topping_id=id)
def save_topping(topping):
t = text('insert into Topping (name, price) values (:name, :price)')
db.engine.execute(t, name=topping.name, price=topping.price)
def update_topping(topping):
t = text(
'update Topping set name = :name, price = :price where id = :topping_id')
db.engine.execute(t,
name=topping.name,
price=topping.price,
topping_id=topping.id)
class Topping():
"""The topping class, which is included in pizzas.
variables:
id - id of the topping
name - name of the topping
price - price of the topping in USD
"""
def __init__(self, id, name, price):
self.id = id
self.name = name
self.price = price
@staticmethod
def get_all():
return get_toppings()
def price_without_dollar_sign(self):
return str(self.price)[1:]
@staticmethod
def get_by_id(id):
return get_topping(id)
def save(self):
if self.id:
update_topping(self)
return
save_topping(self)
@staticmethod
def delete_by_id(id):
delete_topping(id)
def delete(self):
if not self.id:
print 'can\'t delete without id t: topping'
return
delete_topping(self.id)
| {
"content_hash": "4def8a06b9a179864479205ac95e9877",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 81,
"avg_line_length": 22.802325581395348,
"alnum_prop": 0.5879653238143804,
"repo_name": "lex/yuuh-pizza-kebab",
"id": "931a80beabe13d7fe3527e03e9e23d587aea7a5e",
"size": "1961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yuuhpizzakebab/topping/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "339"
},
{
"name": "HTML",
"bytes": "28687"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "PLpgSQL",
"bytes": "5640"
},
{
"name": "Python",
"bytes": "43701"
},
{
"name": "Shell",
"bytes": "856"
}
],
"symlink_target": ""
} |
"""Copies AWS EBS snapshots into AWS S3."""
import threading
import time
from typing import Any, Optional, Type, List
import boto3
from libcloudforensics.providers.aws import forensics
from libcloudforensics.providers.aws.internal import account
from libcloudforensics.errors import ResourceCreationError
from dftimewolf.lib import module
from dftimewolf.lib.containers import containers, interface
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.state import DFTimewolfState
INSTANCE_PROFILE_NAME = 'ebsCopy'
class AWSSnapshotS3CopyException(Exception):
"""Class to represent an exception in this collector.
Attributes:
message (str): The error message.
"""
def __init__(self,
message: str) -> None:
"""Initializes the Exception with provided message.
Args:
message (str): The error message.
"""
super().__init__(message)
self.message = message
class AWSSnapshotS3CopyCollector(module.ThreadAwareModule):
"""Copies AWS EBS snapshots into AWS S3.
Snapshot list can be passed in via SetUp parameters, or from an
AWSAttributeContainer from a previous module.
Attributes:
snapshots: The snapshots to copy.
bucket: The destination S3 bucket.
"""
def __init__(self,
state: DFTimewolfState,
name: Optional[str]=None,
critical: Optional[bool]=False) -> None:
"""Initializes a AWSVolumeToS3 collector."""
super(AWSSnapshotS3CopyCollector, self).__init__(
state, name=name, critical=critical)
self.bucket: str = ''
self.region: str = ''
self.subnet: Any = None
self.ec2: Any = None
self.s3: Any = None
self.iam_details: Any = None
self.aws_account = None
self.bucket_exists: bool = False
# pylint: disable=arguments-differ
def SetUp(self,
snapshots: Optional[str] = '',
bucket: str='',
region: str='',
subnet: Optional[str]=None) -> None:
"""Sets up the AWSVolumeToS3 collector.
Args:
snapshots (str): Comma seperated list of snapshot IDs.
bucket (str): The destination s3 bucket.
region (str): The AWS region the snapshots are in.
subnet (str): The subnet to use for the copy instance. Required if there
is no default subnet.
"""
self.bucket = bucket
self.region = region
self.subnet = subnet
self.ec2 = boto3.client('ec2', region_name=self.region)
self.s3 = boto3.client('s3', region_name=self.region)
self.aws_account = account.AWSAccount(
self._PickAvailabilityZone(self.subnet))
if snapshots:
for snap in snapshots.split(','):
self.state.StoreContainer(containers.AWSSnapshot(snap))
# Check the bucket exists
self.bucket_exists = self._CheckBucketExists(self.bucket)
def PreProcess(self) -> None:
"""Set up for the snapshot copy operation."""
if not self.bucket_exists:
self.logger.info('Creating AWS bucket {0:s}'.format(self.bucket))
create_bucket_args = {'Bucket': self.bucket}
# us-east-1 is the default, but throws an error if actually specified.
if self.region != 'us-east-1':
create_bucket_args['LocationConstraint'] = self.region
self.s3.create_bucket(**create_bucket_args)
# Check the snapshots exist
snap_ids = [snap.id for snap in \
self.state.GetContainers(containers.AWSSnapshot)]
if not self._CheckSnapshotsExist(snap_ids):
self.ModuleError(
'Could not find the snapshots ids to copy.',
critical=True)
# Create the IAM pieces
self.iam_details = forensics.CopyEBSSnapshotToS3SetUp(
self.aws_account, INSTANCE_PROFILE_NAME)
if self.iam_details['profile']['created']:
time.sleep(20) # Propagation delay
def Process(self, container: containers.AWSSnapshot) -> None:
"""Perform the copy of the snapshot to S3."""
# Aws accounts have thread safety issues. Create a unique one per thread
aws_account = account.AWSAccount(self._PickAvailabilityZone(self.subnet))
try:
result = forensics.CopyEBSSnapshotToS3Process(aws_account,
self.bucket,
container.id,
self.iam_details['profile']['arn'],
subnet_id=self.subnet)
self.state.StoreContainer(containers.AWSS3Object(result['image']))
for h in result['hashes']:
self.state.StoreContainer(containers.AWSS3Object(h))
except ResourceCreationError as exception:
self.ModuleError('Exception during copy operation: {0!s}'.
format(exception), critical=True)
def PostProcess(self) -> None:
"""Clean up afterwards."""
forensics.CopyEBSSnapshotToS3TearDown(
self.aws_account, INSTANCE_PROFILE_NAME, self.iam_details)
# pylint: disable=inconsistent-return-statements
def _PickAvailabilityZone(self, subnet: Optional[str]='') -> str:
"""Given a region + subnet, pick an availability zone.
If the subnet is provided, it's AZ is returned. Otherwise, one is picked
from those available in the region.
Args:
subnet (str): Optional. An EC2 subnet ID.
Returns:
A string representing the AZ.
Raises:
AWSSnapshotS3CopyException: If no suitable AZ can be found.
"""
# If we received a subnet ID, return the AZ for it
if subnet:
subnets = self.ec2.describe_subnets(SubnetIds=[subnet])
return str(subnets['Subnets'][0]['AvailabilityZone'])
# Otherwise, pick one.
response = self.ec2.describe_availability_zones(
Filters=[{'Name': 'region-name','Values': [self.region]}])
for zone in response['AvailabilityZones']:
if zone['State'] == 'available':
return str(zone['ZoneName'])
# If we reached here, we have a problem
raise AWSSnapshotS3CopyException('No suitable availability zone found')
def _CheckSnapshotsExist(self, snap_ids: List[str]) -> bool:
"""Check the snapshots that we want to copy exist.
Args:
snap_ids (List[str]): A list of snapshot IDs to look for.
Returns:
True if the snapshots all exist and we have permissions to list them,
False otherwise.
"""
try:
self.ec2.describe_snapshots(SnapshotIds=snap_ids)
except self.ec2.exceptions.ClientError:
return False
return True
def _CheckBucketExists(self, bucket_name: str) -> bool:
"""Checks whether a bucket exists in the configured AWS account.
Args:
bucket_name (str): The bucket name to look for.
Returns:
True if the bucket exists and we have permissions to confirm that, False
otherwise.
"""
buckets = [bucket['Name'] for bucket in self.s3.list_buckets()['Buckets']]
return bucket_name in buckets
@staticmethod
def GetThreadOnContainerType() -> Type[interface.AttributeContainer]:
return containers.AWSSnapshot
def GetThreadPoolSize(self) -> int:
return 10
modules_manager.ModulesManager.RegisterModule(AWSSnapshotS3CopyCollector)
| {
"content_hash": "5190e2fddb7f50385e3346f488829ab1",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 78,
"avg_line_length": 33.48557692307692,
"alnum_prop": 0.6811198851399857,
"repo_name": "Onager/dftimewolf",
"id": "cd2e88174776b9ce385a852346bf791eb2094295",
"size": "6984",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dftimewolf/lib/collectors/aws_snapshot_s3_copy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "979863"
},
{
"name": "Shell",
"bytes": "11584"
}
],
"symlink_target": ""
} |
"""
Support for showing random numbers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.random/
"""
import asyncio
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_MINIMUM, CONF_MAXIMUM, CONF_UNIT_OF_MEASUREMENT)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Random Sensor'
DEFAULT_MIN = 0
DEFAULT_MAX = 20
ICON = 'mdi:hanger'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MAXIMUM, default=DEFAULT_MAX): cv.positive_int,
vol.Optional(CONF_MINIMUM, default=DEFAULT_MIN): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Random number sensor."""
name = config.get(CONF_NAME)
minimum = config.get(CONF_MINIMUM)
maximum = config.get(CONF_MAXIMUM)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
async_add_devices([RandomSensor(name, minimum, maximum, unit)], True)
return True
class RandomSensor(Entity):
"""Representation of a Random number sensor."""
def __init__(self, name, minimum, maximum, unit_of_measurement):
"""Initialize the sensor."""
self._name = name
self._minimum = minimum
self._maximum = maximum
self._unit_of_measurement = unit_of_measurement
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@asyncio.coroutine
def async_update(self):
"""Get a new number and updates the states."""
from random import randrange
self._state = randrange(self._minimum, self._maximum + 1)
| {
"content_hash": "4d3fab926109e9c1b0fd88bc065a51d1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 29.246913580246915,
"alnum_prop": 0.6821443647108485,
"repo_name": "ct-23/home-assistant",
"id": "b908324521886adcaf12b57036bbbc3452f28058",
"size": "2369",
"binary": false,
"copies": "12",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/random.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1686761"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7310847"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15154"
}
],
"symlink_target": ""
} |
import urllib2
from threading import Thread,Lock
from Queue import Queue
import time
from BeautifulSoup import BeautifulSoup
class Fetcher:
def __init__(self,threads):
self.opener = urllib2.build_opener(urllib2.HTTPHandler)
self.lock = Lock() #线程锁
self.q_req = Queue() #任务队列
self.q_ans = Queue() #完成队列
self.threads = threads
for i in range(threads):
t = Thread(target=self.threadget)
t.setDaemon(True)
t.start()
self.running = 0
def __del__(self): #解构时需等待两个队列完成
time.sleep(0.5)
self.q_req.join()
self.q_ans.join()
def taskleft(self):
return self.q_req.qsize()+self.q_ans.qsize()+self.running
def push(self,req):
self.q_req.put(req)
def pop(self):
return self.q_ans.get()
def threadget(self):
while True:
req = self.q_req.get()
with self.lock: #要保证该操作的原子性,进入critical area
self.running += 1
try:
ans = self.opener.open(req).read()
# print ans
except Exception, what:
ans = ''
print what
self.q_ans.put((req,ans))
with self.lock:
self.running -= 1
self.q_req.task_done()
time.sleep(0.1) # don't spam
if __name__ == "__main__":
t0 = time.time()
links = [ 'http://www.verycd.com/topics/%d/'%i for i in range(5430,5460) ]
f = Fetcher(threads=10)
for url in links:
f.push(url)
while f.taskleft():
url,content = f.pop()
if len(content) > 0:
html = BeautifulSoup(content)
titleDiv = html.find('title')
if len(titleDiv) > 0:
print titleDiv.text
t1 = time.time()
print str(t1 - t0) | {
"content_hash": "9cfa601f2c0cc4163d4a035c03544059",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 26.797101449275363,
"alnum_prop": 0.5251487290427258,
"repo_name": "Guzi219/Python",
"id": "3d83c5c7b56a77757652b05cf017b7cd18646811",
"size": "1944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "com/github/demo/MutipleThreadCrawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106740"
}
],
"symlink_target": ""
} |
"""
A script to export Flocker log files and system information.
"""
from gzip import open as gzip_open
import os
from platform import dist as platform_dist
import re
from shutil import copyfileobj, make_archive, rmtree
from socket import gethostname
from subprocess import check_call, check_output
from uuid import uuid1
from pyrsistent import PClass, field
from flocker import __version__
def gzip_file(source_path, archive_path):
"""
Create a gzip compressed archive of ``source_path`` at ``archive_path``.
An empty archive file will be created if the source file does not exist.
This gives the diagnostic archive a consistent set of files which can
easily be tested.
"""
with gzip_open(archive_path, 'wb') as archive:
if os.path.isfile(source_path):
with open(source_path, 'rb') as source:
copyfileobj(source, archive)
class FlockerDebugArchive(object):
"""
Create a tar archive containing:
* Flocker version,
* logs from all installed Flocker services,
* some or all of the syslog depending on the logging system,
* Docker version and configuration information, and
* a list of all the services installed on the system and their status.
"""
def __init__(self, service_manager, log_exporter):
"""
:param service_manager: An API for listing installed services.
:param log_exporter: An API for exporting logs for services.
"""
self._service_manager = service_manager
self._log_exporter = log_exporter
self._suffix = unicode(uuid1())
self._archive_name = "clusterhq_flocker_logs_{}".format(
self._suffix
)
self._archive_path = os.path.abspath(self._archive_name)
def _logfile_path(self, name):
"""
Generate a path to a file inside the archive directory.
:param str name: A unique label for the file.
:returns: An absolute path string for a file inside the archive
directory.
"""
return os.path.join(
self._archive_name,
name,
)
def _open_logfile(self, name):
"""
:param str name: A unique label for the file.
:return: An open ``file`` object with a name generated by
`_logfile_path`.
"""
return open(self._logfile_path(name), 'w')
def create(self):
"""
Create the archive by first creating a uniquely named directory in the
current working directory, adding the log files and debug information,
creating a ``tar`` archive from the directory and finally removing the
directory.
"""
os.makedirs(self._archive_path)
try:
# Flocker version
with self._open_logfile('flocker-version') as output:
output.write(__version__.encode('utf-8') + b'\n')
# Flocker logs.
services = self._service_manager.flocker_services()
for service_name, service_status in services:
self._log_exporter.export_flocker(
service_name=service_name,
target_path=self._logfile_path(service_name)
)
# Syslog.
self._log_exporter.export_all(self._logfile_path('syslog'))
# Status of all services.
with self._open_logfile('service-status') as output:
services = self._service_manager.all_services()
for service_name, service_status in services:
output.write(service_name + " " + service_status + "\n")
# Docker version
check_call(
['docker', 'version'],
stdout=self._open_logfile('docker-version')
)
# Docker configuration
check_call(
['docker', 'info'],
stdout=self._open_logfile('docker-info')
)
# Kernel version
self._open_logfile('uname').write(' '.join(os.uname()))
# Distribution version
self._open_logfile('os-release').write(
open('/etc/os-release').read()
)
# Network configuration
check_call(
['ip', 'addr'],
stdout=self._open_logfile('ip-addr')
)
# Hostname
self._open_logfile('hostname').write(gethostname() + '\n')
# Partition information
check_call(
['fdisk', '-l'],
stdout=self._open_logfile('fdisk')
)
# Block Device and filesystem information
check_call(
['lsblk', '--all'],
stdout=self._open_logfile('lsblk')
)
# Create a single archive file
archive_path = make_archive(
base_name=self._archive_name,
format='tar',
root_dir=os.path.dirname(self._archive_path),
base_dir=os.path.basename(self._archive_path),
)
finally:
# Attempt to remove the source directory.
rmtree(self._archive_path)
return archive_path
class SystemdServiceManager(object):
"""
List services managed by Systemd.
"""
def all_services(self):
"""
Iterate the name and status of all services known to SystemD.
"""
output = check_output(['systemctl', 'list-unit-files', '--no-legend'])
for line in output.splitlines():
line = line.rstrip()
service_name, service_status = line.split(None, 1)
yield service_name, service_status
def flocker_services(self):
"""
Iterate the name and status of the Flocker services known to SystemD.
"""
service_pattern = r'^(?P<service_name>flocker-.+)\.service'
for service_name, service_status in self.all_services():
match = re.match(service_pattern, service_name)
if match:
service_name = match.group('service_name')
if service_status == 'enabled':
yield service_name, service_status
class UpstartServiceManager(object):
"""
List services managed by Upstart.
"""
def all_services(self):
"""
Iterate the name and status of all services known to Upstart.
"""
for line in check_output(['initctl', 'list']).splitlines():
service_name, service_status = line.split(None, 1)
yield service_name, service_status
def flocker_services(self):
"""
Iterate the name and status of the Flocker services known to Upstart.
"""
for service_name, service_status in self.all_services():
if service_name.startswith('flocker-'):
yield service_name, service_status
class JournaldLogExporter(object):
"""
Export logs managed by JournalD.
"""
def export_flocker(self, service_name, target_path):
"""
Export logs for ``service_name`` to ``target_path`` compressed using
``gzip``.
"""
# Centos-7 doesn't have separate startup logs.
open(target_path + '_startup.gz', 'w').close()
check_call(
'journalctl --all --output cat --unit {}.service '
'| gzip'.format(service_name),
stdout=open(target_path + '_eliot.gz', 'w'),
shell=True
)
def export_all(self, target_path):
"""
Export all system logs to ``target_path`` compressed using ``gzip``.
"""
check_call(
'journalctl --all --boot | gzip',
stdout=open(target_path + '.gz', 'w'),
shell=True
)
class UpstartLogExporter(object):
"""
Export logs for services managed by Upstart and written by RSyslog.
"""
def export_flocker(self, service_name, target_path):
"""
Export logs for ``service_name`` to ``target_path`` compressed using
``gzip``.
"""
files = [
("/var/log/upstart/{}.log".format(service_name),
target_path + '_startup.gz'),
("/var/log/flocker/{}.log".format(service_name),
target_path + '_eliot.gz'),
]
for source_path, archive_path in files:
gzip_file(source_path, archive_path)
def export_all(self, target_path):
"""
Export all system logs to ``target_path`` compressed using ``gzip``.
"""
gzip_file('/var/log/syslog', target_path + '.gz')
class Distribution(PClass):
"""
A record of the service manager and log exported to be used on each
supported Linux distribution.
:ivar str name: The name of the operating system.
:ivar str version: The version of the operating system.
:ivar service_manager: The service manager API to use for this
operating system.
:ivar log_exporter: The log exporter API to use for this operating
system.
"""
name = field(type=unicode, mandatory=True)
version = field(type=unicode, mandatory=True)
service_manager = field(mandatory=True)
log_exporter = field(mandatory=True)
DISTRIBUTIONS = (
Distribution(
name=u'centos',
version=u'7',
service_manager=SystemdServiceManager,
log_exporter=JournaldLogExporter,
),
Distribution(
name=u'ubuntu',
version=u'14.04',
service_manager=UpstartServiceManager,
log_exporter=UpstartLogExporter,
)
)
DISTRIBUTION_BY_LABEL = dict(
('{}-{}'.format(p.name, p.version), p)
for p in DISTRIBUTIONS
)
def current_distribution():
"""
:returns: A ``str`` label for the operating system distribution running
this script.
"""
name, version, nickname = platform_dist()
return name.lower() + '-' + version
def lookup_distribution(distribution_label):
"""
:param str distribution_label: The label of the distribution to lookup.
:returns: A ``Distribution`` matching the supplied ``distribution_label``
of ``None`` if the ``distribution_label`` is not supported.
"""
for label, distribution in DISTRIBUTION_BY_LABEL.items():
if distribution_label.startswith(label):
return distribution
| {
"content_hash": "9d8fa5db72db8d928d1014d343b64557",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 78,
"avg_line_length": 32.65408805031446,
"alnum_prop": 0.5792565485362096,
"repo_name": "wallnerryan/flocker-profiles",
"id": "0170ef9f96b4ced6aa087011548c305bf5d648ab",
"size": "10443",
"binary": false,
"copies": "8",
"ref": "refs/heads/profile_metadata",
"path": "flocker/node/_diagnostics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2715629"
},
{
"name": "Ruby",
"bytes": "6401"
},
{
"name": "Shell",
"bytes": "3469"
}
],
"symlink_target": ""
} |
'''Recipes for more efficient work with linalg using classes
intended for use for multivariate normal and linear regression
calculations
x is the data (nobs, nvars)
m is the moment matrix (x'x) or a covariance matrix Sigma
examples:
x'sigma^{-1}x
z = Px where P=Sigma^{-1/2} or P=Sigma^{1/2}
Initially assume positive definite, then add spectral cutoff and
regularization of moment matrix, and extend to PCA
maybe extend to sparse if some examples work out
(transformation matrix P for random effect and for toeplitz)
Author: josef-pktd
Created on 2010-10-20
'''
import numpy as np
from scipy import linalg
#this has been copied from nitime a long time ago
#TODO: ceck whether class has changed in nitime
class OneTimeProperty(object):
"""A descriptor to make special properties that become normal attributes.
This is meant to be used mostly by the auto_attr decorator in this module.
Author: Fernando Perez, copied from nitime
"""
def __init__(self,func):
"""Create a OneTimeProperty instance.
Parameters
----------
func : method
The method that will be called the first time to compute a value.
Afterwards, the method's name will be a standard attribute holding
the value of this computation.
"""
self.getter = func
self.name = func.func_name
def __get__(self,obj,type=None):
"""This will be called on attribute access on the class or instance. """
if obj is None:
# Being called on the class, return the original function. This way,
# introspection works on the class.
#return func
print 'class access'
return self.getter
val = self.getter(obj)
#print "** auto_attr - loading '%s'" % self.name # dbg
setattr(obj, self.name, val)
return val
class PlainMatrixArray(object):
'''Class that defines linalg operation on an array
simplest version as benchmark
linear algebra recipes for multivariate normal and linear
regression calculations
'''
def __init__(self, data=None, sym=None):
if not data is None:
if sym is None:
self.x = np.asarray(data)
self.m = np.dot(self.x.T, self.x)
else:
raise ValueError('data and sym cannot be both given')
elif not sym is None:
self.m = np.asarray(sym)
self.x = np.eye(*self.m.shape) #default
else:
raise ValueError('either data or sym need to be given')
@OneTimeProperty
def minv(self):
return np.linalg.inv(self.m)
@OneTimeProperty
def m_y(self, y):
return np.dot(self.m, y)
def minv_y(self, y):
return np.dot(self.minv, y)
@OneTimeProperty
def mpinv(self):
return linalg.pinv(self.m)
@OneTimeProperty
def xpinv(self):
return linalg.pinv(self.x)
def yt_m_y(self, y):
return np.dot(y.T, np.dot(self.m, y))
def yt_minv_y(self, y):
return np.dot(y.T, np.dot(self.minv, y))
#next two are redundant
def y_m_yt(self, y):
return np.dot(y, np.dot(self.m, y.T))
def y_minv_yt(self, y):
return np.dot(y, np.dot(self.minv, y.T))
@OneTimeProperty
def mdet(self):
return linalg.det(self.m)
@OneTimeProperty
def mlogdet(self):
return np.log(linalg.det(self.m))
@OneTimeProperty
def meigh(self):
evals, evecs = linalg.eigh(self.m)
sortind = np.argsort(evals)[::-1]
return evals[sortind], evecs[:,sortind]
@OneTimeProperty
def mhalf(self):
evals, evecs = self.meigh
return np.dot(np.diag(evals**0.5), evecs.T)
#return np.dot(evecs, np.dot(np.diag(evals**0.5), evecs.T))
#return np.dot(evecs, 1./np.sqrt(evals) * evecs.T))
@OneTimeProperty
def minvhalf(self):
evals, evecs = self.meigh
return np.dot(evecs, 1./np.sqrt(evals) * evecs.T)
class SvdArray(PlainMatrixArray):
'''Class that defines linalg operation on an array
svd version, where svd is taken on original data array, if
or when it matters
no spectral cutoff in first version
'''
def __init__(self, data=None, sym=None):
super(SvdArray, self).__init__(data=data, sym=sym)
u, s, v = np.linalg.svd(self.x, full_matrices=1)
self.u, self.s, self.v = u, s, v
self.sdiag = linalg.diagsvd(s, *x.shape)
self.sinvdiag = linalg.diagsvd(1./s, *x.shape)
def _sdiagpow(self, p):
return linalg.diagsvd(np.power(self.s, p), *x.shape)
@OneTimeProperty
def minv(self):
sinvv = np.dot(self.sinvdiag, self.v)
return np.dot(sinvv.T, sinvv)
@OneTimeProperty
def meigh(self):
evecs = self.v.T
evals = self.s**2
return evals, evecs
@OneTimeProperty
def mdet(self):
return self.meigh[0].prod()
@OneTimeProperty
def mlogdet(self):
return np.log(self.meigh[0]).sum()
@OneTimeProperty
def mhalf(self):
return np.dot(np.diag(self.s), self.v)
@OneTimeProperty
def xxthalf(self):
return np.dot(self.u, self.sdiag)
@OneTimeProperty
def xxtinvhalf(self):
return np.dot(self.u, self.sinvdiag)
class CholArray(PlainMatrixArray):
'''Class that defines linalg operation on an array
cholesky version, where svd is taken on original data array, if
or when it matters
plan: use cholesky factor and cholesky solve
nothing implemented yet
'''
def __init__(self, data=None, sym=None):
super(SvdArray, self).__init__(data=data, sym=sym)
def yt_minv_y(self, y):
'''xSigmainvx
doesn't use stored cholesky yet
'''
return np.dot(x,linalg.cho_solve(linalg.cho_factor(self.m),x))
#same as
#lower = False #if cholesky(sigma) is used, default is upper
#np.dot(x,linalg.cho_solve((self.cholsigma, lower),x))
def testcompare(m1, m2):
from numpy.testing import assert_almost_equal, assert_approx_equal
decimal = 12
#inv
assert_almost_equal(m1.minv, m2.minv, decimal=decimal)
#matrix half and invhalf
#fix sign in test, should this be standardized
s1 = np.sign(m1.mhalf.sum(1))[:,None]
s2 = np.sign(m2.mhalf.sum(1))[:,None]
scorr = s1/s2
assert_almost_equal(m1.mhalf, m2.mhalf * scorr, decimal=decimal)
assert_almost_equal(m1.minvhalf, m2.minvhalf, decimal=decimal)
#eigenvalues, eigenvectors
evals1, evecs1 = m1.meigh
evals2, evecs2 = m2.meigh
assert_almost_equal(evals1, evals2, decimal=decimal)
#normalization can be different: evecs in columns
s1 = np.sign(evecs1.sum(0))
s2 = np.sign(evecs2.sum(0))
scorr = s1/s2
assert_almost_equal(evecs1, evecs2 * scorr, decimal=decimal)
#determinant
assert_approx_equal(m1.mdet, m2.mdet, significant=13)
assert_approx_equal(m1.mlogdet, m2.mlogdet, significant=13)
####### helper function for interactive work
def tiny2zero(x, eps = 1e-15):
'''replace abs values smaller than eps by zero, makes copy
'''
mask = np.abs(x.copy()) < eps
x[mask] = 0
return x
def maxabs(x):
return np.max(np.abs(x))
if __name__ == '__main__':
n = 5
y = np.arange(n)
x = np.random.randn(100,n)
autocov = 2*0.8**np.arange(n) +0.01 * np.random.randn(n)
sigma = linalg.toeplitz(autocov)
mat = PlainMatrixArray(sym=sigma)
print tiny2zero(mat.mhalf)
mih = mat.minvhalf
print tiny2zero(mih) #for nicer printing
mat2 = PlainMatrixArray(data=x)
print maxabs(mat2.yt_minv_y(np.dot(x.T, x)) - mat2.m)
print tiny2zero(mat2.minv_y(mat2.m))
mat3 = SvdArray(data=x)
print mat3.meigh[0]
print mat2.meigh[0]
testcompare(mat2, mat3)
'''
m = np.dot(x.T, x)
u,s,v = np.linalg.svd(x, full_matrices=1)
Sig = linalg.diagsvd(s,*x.shape)
>>> np.max(np.abs(np.dot(u, np.dot(Sig, v)) - x))
3.1086244689504383e-015
>>> np.max(np.abs(np.dot(u.T, u) - np.eye(100)))
3.3306690738754696e-016
>>> np.max(np.abs(np.dot(v.T, v) - np.eye(5)))
6.6613381477509392e-016
>>> np.max(np.abs(np.dot(Sig.T, Sig) - np.diag(s**2)))
5.6843418860808015e-014
>>> evals,evecs = linalg.eigh(np.dot(x.T, x))
>>> evals[::-1]
array([ 123.36404464, 112.17036442, 102.04198468, 76.60832278,
74.70484487])
>>> s**2
array([ 123.36404464, 112.17036442, 102.04198468, 76.60832278,
74.70484487])
>>> np.max(np.abs(np.dot(v.T, np.dot(np.diag(s**2), v)) - m))
1.1368683772161603e-013
>>> us = np.dot(u, Sig)
>>> np.max(np.abs(np.dot(us, us.T) - np.dot(x, x.T)))
1.0658141036401503e-014
>>> sv = np.dot(Sig, v)
>>> np.max(np.abs(np.dot(sv.T, sv) - np.dot(x.T, x)))
1.1368683772161603e-013
'''
| {
"content_hash": "2b24c08916b8b15298da87bb2b6220fe",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 80,
"avg_line_length": 26.864864864864863,
"alnum_prop": 0.6134585289514867,
"repo_name": "wesm/statsmodels",
"id": "ff1b7bc3395a905beb77ca279455757614c9c403",
"size": "8946",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scikits/statsmodels/sandbox/archive/linalg_decomp_1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "Python",
"bytes": "3470843"
},
{
"name": "R",
"bytes": "2168"
}
],
"symlink_target": ""
} |
import json
import os
from flask import current_app as app
from werkzeug.utils import secure_filename
from app import get_db, s3_client
@app.context_processor
def get_furnitures():
def _get_furnitures():
return list(get_db().furnitures.find({}, {"_id": False}))
return dict(get_furnitures=_get_furnitures)
@app.context_processor
def get_events():
def _get_events():
return list(get_db().events.find({}, {"_id": False}))
return dict(get_events=_get_events)
# Jinja Filters
@app.template_filter("to_jobs")
def to_jobs(company_id):
jobs = list(get_db().jobs.find({"company_id": company_id}))
for j in jobs:
j['_id'] = str(j['_id'])
return list(jobs)
@app.template_filter("to_furniture")
def to_furniture(furniture_id):
return get_db().furnitures.find_one({"id": furniture_id})
@app.template_filter("to_days")
def to_days(duration):
opts = {"wed": "Mercredi", "thu": "Jeudi", "both": "Mercredi & Jeudi"}
return opts[duration]
@app.template_filter("to_size")
def to_size(size):
return int(size) if float(size).is_integer() else float(size)
@app.template_filter("to_human")
def to_human(num):
return str(num)
@app.template_filter("to_com_url")
def to_com_url(oid, comp_name, ext, type):
try:
filename = secure_filename(f'{comp_name}_{type.upper()}')
cdr = f'inline; filename={filename}.{ext}'
url = s3_client.generate_presigned_url(
'get_object',
Params={'ResponseContentDisposition': cdr, 'Bucket': os.environ.get('BUCKET_NAME'), 'Key': f'com_files/{type}/{oid}.{ext}'}
)
return url
except Exception as e:
print("error in getting com url", e)
return "#"
@app.template_filter("nb_dishes")
def nb_dishes(size):
if size == 4.5:
return 0
elif 9 <= size <= 12:
return 2
elif 12 < size <= 18:
return 4
elif size > 18:
return 6
@app.template_filter("empty_furnitures")
def empty_furniture(f):
return sum(f.values()) == 0
@app.template_filter("empty_events")
def empty_events(e):
return any(e.values())
@app.template_filter("empty_dishes")
def empty_dishes(d):
return sum([sum(a.values()) for a in d.values()]) == 0
| {
"content_hash": "27c5dc42d3d8384c6e8e75f9ab21d8b9",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 135,
"avg_line_length": 23.694736842105264,
"alnum_prop": 0.6246112838738338,
"repo_name": "ForumOrganisation/forumorg",
"id": "01d692a856a930b9e1b364f706e046c80c47eede",
"size": "2251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/companies/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "570943"
},
{
"name": "Dockerfile",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "645487"
},
{
"name": "JavaScript",
"bytes": "310986"
},
{
"name": "Python",
"bytes": "91339"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class HypervisorViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('extension_supported',
'hypervisor_list',
'hypervisor_stats',
'service_list')})
def test_index(self):
hypervisors = self.hypervisors.list()
services = self.services.list()
stats = self.hypervisors.stats
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.hypervisor_list(IsA(http.HttpRequest)).AndReturn(hypervisors)
api.nova.hypervisor_stats(IsA(http.HttpRequest)).AndReturn(stats)
api.nova.service_list(IsA(http.HttpRequest)).AndReturn(services)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:hypervisors:index'))
self.assertTemplateUsed(res, 'admin/hypervisors/index.html')
hypervisors_tab = res.context['tab_group'].get_tab('hypervisor')
self.assertItemsEqual(hypervisors_tab._tables['hypervisors'].data,
hypervisors)
host_tab = res.context['tab_group'].get_tab('compute_host')
host_table = host_tab._tables['compute_host']
compute_services = [service for service in services
if service.binary == 'nova-compute']
self.assertItemsEqual(host_table.data, compute_services)
actions_host_up = host_table.get_row_actions(host_table.data[0])
self.assertEqual(1, len(actions_host_up))
actions_host_down = host_table.get_row_actions(host_table.data[1])
self.assertEqual(2, len(actions_host_down))
self.assertEqual('evacuate', actions_host_down[0].name)
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_list')})
def test_service_list_unavailable(self):
"""test that error message should be returned when
nova.service_list isn't available
"""
hypervisors = self.hypervisors.list()
stats = self.hypervisors.stats
api.nova.hypervisor_list(IsA(http.HttpRequest)).AndReturn(hypervisors)
api.nova.hypervisor_stats(IsA(http.HttpRequest)).AndReturn(stats)
api.nova.service_list(IsA(http.HttpRequest)).AndRaise(
self.exceptions.nova)
self.mox.ReplayAll()
resp = self.client.get(reverse('horizon:admin:hypervisors:index'))
self.assertMessageCount(resp, error=1, warning=0)
class HypervisorDetailViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('hypervisor_search',)})
def test_index(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
api.nova.hypervisor_search(
IsA(http.HttpRequest), hypervisor).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:admin:hypervisors:detail', args=[hypervisor])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/hypervisors/detail.html')
self.assertItemsEqual(res.context['table'].data, [])
| {
"content_hash": "3b8c7d327360d036ae839eaaeedc9c45",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 45.851351351351354,
"alnum_prop": 0.632773356911288,
"repo_name": "nvoron23/avos",
"id": "a854405fbf25999264744f19384f77b37cba0a07",
"size": "4001",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/hypervisors/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1407"
},
{
"name": "CSS",
"bytes": "85008"
},
{
"name": "Gettext Catalog",
"bytes": "10294282"
},
{
"name": "HTML",
"bytes": "457426"
},
{
"name": "JavaScript",
"bytes": "904618"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4318649"
},
{
"name": "Scala",
"bytes": "894"
},
{
"name": "Shell",
"bytes": "17503"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from wtforms.compat import text_type
from wtforms.validators import (
StopValidation, ValidationError, email, equal_to,
ip_address, length, required, optional, regexp,
url, NumberRange, AnyOf, NoneOf, mac_address, UUID,
input_required, data_required
)
from functools import partial
from tests.common import DummyField, grab_error_message, grab_stop_message
class DummyForm(dict):
pass
class ValidatorsTest(TestCase):
def setUp(self):
self.form = DummyForm()
def test_email(self):
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertRaises(ValidationError, email(), self.form, DummyField(None))
self.assertRaises(ValidationError, email(), self.form, DummyField(''))
self.assertRaises(ValidationError, email(), self.form, DummyField(' '))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo'))
self.assertRaises(ValidationError, email(), self.form, DummyField('bar.dk'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@'))
self.assertRaises(ValidationError, email(), self.form, DummyField('@bar.dk'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@bar'))
self.assertRaises(ValidationError, email(), self.form, DummyField('[email protected]'))
self.assertRaises(ValidationError, email(), self.form, DummyField('[email protected]'))
# Test IDNA domains
self.assertEqual(email()(self.form, DummyField(u'foo@bücher.中国')), None)
def test_equal_to(self):
self.form['foo'] = DummyField('test')
self.assertEqual(equal_to('foo')(self.form, self.form['foo']), None)
self.assertRaises(ValidationError, equal_to('invalid_field_name'), self.form, DummyField('test'))
self.assertRaises(ValidationError, equal_to('foo'), self.form, DummyField('different_value'))
def test_ip_address(self):
self.assertEqual(ip_address()(self.form, DummyField('127.0.0.1')), None)
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('abc.0.0.1'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('1278.0.0.1'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('127.0.0.abc'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('900.200.100.75'))
for bad_address in ('abc.0.0.1', 'abcd:1234::123::1', '1:2:3:4:5:6:7:8:9', 'abcd::1ffff'):
self.assertRaises(ValidationError, ip_address(ipv6=True), self.form, DummyField(bad_address))
for good_address in ('::1', 'dead:beef:0:0:0:0:42:1', 'abcd:ef::42:1'):
self.assertEqual(ip_address(ipv6=True)(self.form, DummyField(good_address)), None)
# Test ValueError on ipv6=False and ipv4=False
self.assertRaises(ValueError, ip_address, ipv4=False, ipv6=False)
def test_mac_address(self):
self.assertEqual(mac_address()(self.form,
DummyField('01:23:45:67:ab:CD')), None)
check_fail = partial(
self.assertRaises, ValidationError,
mac_address(), self.form
)
check_fail(DummyField('00:00:00:00:00'))
check_fail(DummyField('01:23:45:67:89:'))
check_fail(DummyField('01:23:45:67:89:gh'))
check_fail(DummyField('123:23:45:67:89:00'))
def test_uuid(self):
self.assertEqual(
UUID()(self.form, DummyField('2bc1c94f-0deb-43e9-92a1-4775189ec9f8')),
None
)
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('2bc1c94f-deb-43e9-92a1-4775189ec9f8'))
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('2bc1c94f-0deb-43e9-92a1-4775189ec9f'))
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('gbc1c94f-0deb-43e9-92a1-4775189ec9f8'))
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('2bc1c94f 0deb-43e9-92a1-4775189ec9f8'))
def test_length(self):
field = DummyField('foobar')
self.assertEqual(length(min=2, max=6)(self.form, field), None)
self.assertRaises(ValidationError, length(min=7), self.form, field)
self.assertEqual(length(min=6)(self.form, field), None)
self.assertRaises(ValidationError, length(max=5), self.form, field)
self.assertEqual(length(max=6)(self.form, field), None)
self.assertRaises(AssertionError, length)
self.assertRaises(AssertionError, length, min=5, max=2)
# Test new formatting features
grab = lambda **k: grab_error_message(length(**k), self.form, field)
self.assertEqual(grab(min=2, max=5, message='%(min)d and %(max)d'), '2 and 5')
self.assertTrue('at least 8' in grab(min=8))
self.assertTrue('longer than 5' in grab(max=5))
self.assertTrue('between 2 and 5' in grab(min=2, max=5))
def test_required(self):
self.assertEqual(required()(self.form, DummyField('foobar')), None)
self.assertRaises(StopValidation, required(), self.form, DummyField(''))
def test_data_required(self):
# Make sure we stop the validation chain
self.assertEqual(data_required()(self.form, DummyField('foobar')), None)
self.assertRaises(StopValidation, data_required(), self.form, DummyField(''))
self.assertRaises(StopValidation, data_required(), self.form, DummyField(' '))
self.assertEqual(data_required().field_flags, ('required', ))
# Make sure we clobber errors
f = DummyField('', ['Invalid Integer Value'])
self.assertEqual(len(f.errors), 1)
self.assertRaises(StopValidation, data_required(), self.form, f)
self.assertEqual(len(f.errors), 0)
# Check message and custom message
grab = lambda **k: grab_stop_message(data_required(**k), self.form, DummyField(''))
self.assertEqual(grab(), 'This field is required.')
self.assertEqual(grab(message='foo'), 'foo')
def test_input_required(self):
self.assertEqual(input_required()(self.form, DummyField('foobar', raw_data=['foobar'])), None)
self.assertRaises(StopValidation, input_required(), self.form, DummyField('', raw_data=['']))
self.assertEqual(input_required().field_flags, ('required', ))
# Check message and custom message
grab = lambda **k: grab_stop_message(input_required(**k), self.form, DummyField('', raw_data=['']))
self.assertEqual(grab(), 'This field is required.')
self.assertEqual(grab(message='foo'), 'foo')
def test_optional(self):
self.assertEqual(optional()(self.form, DummyField('foobar', raw_data=['foobar'])), None)
self.assertRaises(StopValidation, optional(), self.form, DummyField('', raw_data=['']))
self.assertEqual(optional().field_flags, ('optional', ))
f = DummyField('', ['Invalid Integer Value'], raw_data=[''])
self.assertEqual(len(f.errors), 1)
self.assertRaises(StopValidation, optional(), self.form, f)
self.assertEqual(len(f.errors), 0)
# Test for whitespace behavior.
whitespace_field = DummyField(' ', raw_data=[' '])
self.assertRaises(StopValidation, optional(), self.form, whitespace_field)
self.assertEqual(optional(strip_whitespace=False)(self.form, whitespace_field), None)
def test_regexp(self):
import re
# String regexp
self.assertEqual(regexp('^a')(self.form, DummyField('abcd')).group(0), 'a')
self.assertEqual(regexp('^a', re.I)(self.form, DummyField('ABcd')).group(0), 'A')
self.assertRaises(ValidationError, regexp('^a'), self.form, DummyField('foo'))
self.assertRaises(ValidationError, regexp('^a'), self.form, DummyField(None))
# Compiled regexp
self.assertEqual(regexp(re.compile('^a'))(self.form, DummyField('abcd')).group(0), 'a')
self.assertEqual(regexp(re.compile('^a', re.I))(self.form, DummyField('ABcd')).group(0), 'A')
self.assertRaises(ValidationError, regexp(re.compile('^a')), self.form, DummyField('foo'))
self.assertRaises(ValidationError, regexp(re.compile('^a')), self.form, DummyField(None))
# Check custom message
self.assertEqual(grab_error_message(regexp('^a', message='foo'), self.form, DummyField('f')), 'foo')
def test_url(self):
self.assertEqual(url()(self.form, DummyField('http://foobar.dk')), None)
self.assertEqual(url()(self.form, DummyField('http://foobar.dk/')), None)
self.assertEqual(url()(self.form, DummyField('http://foobar.museum/foobar')), None)
self.assertEqual(url()(self.form, DummyField('http://127.0.0.1/foobar')), None)
self.assertEqual(url()(self.form, DummyField('http://127.0.0.1:9000/fake')), None)
self.assertEqual(url(require_tld=False)(self.form, DummyField('http://localhost/foobar')), None)
self.assertEqual(url(require_tld=False)(self.form, DummyField('http://foobar')), None)
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar'))
self.assertRaises(ValidationError, url(), self.form, DummyField('foobar.dk'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://127.0.0/asdf'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar.d'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar.12'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://localhost:abc/a'))
# Test IDNA
IDNA_TESTS = (
u'http://\u0645\u062b\u0627\u0644.\u0625\u062e\u062a\u0628\u0627\u0631/foo.com', # Arabic test
u'http://उदाहरण.परीक्षा/', # Hindi test
u'http://실례.테스트', # Hangul test
)
for s in IDNA_TESTS:
self.assertEqual(url()(self.form, DummyField(s)), None)
def test_number_range(self):
v = NumberRange(min=5, max=10)
self.assertEqual(v(self.form, DummyField(7)), None)
self.assertRaises(ValidationError, v, self.form, DummyField(None))
self.assertRaises(ValidationError, v, self.form, DummyField(0))
self.assertRaises(ValidationError, v, self.form, DummyField(12))
self.assertRaises(ValidationError, v, self.form, DummyField(-5))
onlymin = NumberRange(min=5)
self.assertEqual(onlymin(self.form, DummyField(500)), None)
self.assertRaises(ValidationError, onlymin, self.form, DummyField(4))
onlymax = NumberRange(max=50)
self.assertEqual(onlymax(self.form, DummyField(30)), None)
self.assertRaises(ValidationError, onlymax, self.form, DummyField(75))
def test_lazy_proxy(self):
"""Tests that the validators support lazy translation strings for messages."""
class ReallyLazyProxy(object):
def __unicode__(self):
raise Exception('Translator function called during form declaration: it should be called at response time.')
__str__ = __unicode__
message = ReallyLazyProxy()
self.assertRaises(Exception, str, message)
self.assertRaises(Exception, text_type, message)
self.assertTrue(equal_to('fieldname', message=message))
self.assertTrue(length(min=1, message=message))
self.assertTrue(NumberRange(1, 5, message=message))
self.assertTrue(required(message=message))
self.assertTrue(regexp('.+', message=message))
self.assertTrue(email(message=message))
self.assertTrue(ip_address(message=message))
self.assertTrue(url(message=message))
def test_any_of(self):
self.assertEqual(AnyOf(['a', 'b', 'c'])(self.form, DummyField('b')), None)
self.assertRaises(ValueError, AnyOf(['a', 'b', 'c']), self.form, DummyField(None))
# Anyof in 1.0.1 failed on numbers for formatting the error with a TypeError
check_num = AnyOf([1, 2, 3])
self.assertEqual(check_num(self.form, DummyField(2)), None)
self.assertRaises(ValueError, check_num, self.form, DummyField(4))
# Test values_formatter
formatter = lambda values: '::'.join(text_type(x) for x in reversed(values))
checker = AnyOf([7, 8, 9], message='test %(values)s', values_formatter=formatter)
self.assertEqual(grab_error_message(checker, self.form, DummyField(4)), 'test 9::8::7')
def test_none_of(self):
self.assertEqual(NoneOf(['a', 'b', 'c'])(self.form, DummyField('d')), None)
self.assertRaises(ValueError, NoneOf(['a', 'b', 'c']), self.form, DummyField('a'))
| {
"content_hash": "060c3b32928971220add46eba523ce88",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 124,
"avg_line_length": 53.35245901639344,
"alnum_prop": 0.6468735596865878,
"repo_name": "jmagnusson/wtforms",
"id": "216705037138db45b8ebc78e2eb5167947916a88",
"size": "13083",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/validators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "307594"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('imagr', '0006_auto_20141111_1324'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_created',
field=models.DateTimeField(default=datetime.datetime(2014, 11, 11, 23, 5, 43, 659018, tzinfo=utc), verbose_name=b'date created'),
preserve_default=True,
),
]
| {
"content_hash": "067f9e72854dc996f04d7ee6863e7b54",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 141,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.6363636363636364,
"repo_name": "miracode/django-imagr",
"id": "6c1fb2c534bbd68cc4b40a24b99e805db11a4515",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagr_site/imagr/migrations/0007_auto_20141111_1505.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59325"
}
],
"symlink_target": ""
} |
import collections
import ctypes
from unittest import mock
import ddt
import six
from os_win import _utils
from os_win import constants
from os_win import exceptions
from os_win.tests.unit import test_base
from os_win.utils.storage.initiator import iscsi_utils
from os_win.utils.winapi import constants as w_const
from os_win.utils.winapi.errmsg import iscsierr
from os_win.utils.winapi.libs import iscsidsc as iscsi_struct
@ddt.ddt
class ISCSIInitiatorUtilsTestCase(test_base.OsWinBaseTestCase):
"""Unit tests for the Hyper-V ISCSIInitiatorUtils class."""
_autospec_classes = [
iscsi_utils.win32utils.Win32Utils,
iscsi_utils.diskutils.DiskUtils,
]
def setUp(self):
super(ISCSIInitiatorUtilsTestCase, self).setUp()
self._initiator = iscsi_utils.ISCSIInitiatorUtils()
self._diskutils = self._initiator._diskutils
self._iscsidsc = mock.patch.object(
iscsi_utils, 'iscsidsc', create=True).start()
self._run_mocker = mock.patch.object(self._initiator,
'_run_and_check_output')
self._mock_run = self._run_mocker.start()
iscsi_utils.portal_map = collections.defaultdict(set)
def _mock_ctypes(self):
self._ctypes = mock.Mock()
# This is used in order to easily make assertions on the variables
# passed by reference.
self._ctypes.byref = lambda x: (x, "byref")
mock.patch.object(iscsi_utils, 'ctypes', self._ctypes).start()
def _get_fake_iscsi_utils_getter_func(self, func_side_effect,
decorator_args,
returned_element_count=None,
required_buff_sz=None):
@iscsi_utils.ensure_buff_and_retrieve_items(**decorator_args)
def fake_func(inst, buff=None, buff_size=None,
element_count=None, *args, **kwargs):
raised_exc = None
try:
# Those arguments will always be ULONGs, as requested
# by the iscsidsc functions.
self.assertIsInstance(buff_size, ctypes.c_ulong)
self.assertIsInstance(element_count, ctypes.c_ulong)
func_side_effect(buff=buff, buff_size_val=buff_size.value,
element_count_val=element_count.value,
*args, **kwargs)
except Exception as ex:
raised_exc = ex
if returned_element_count:
element_count.value = returned_element_count
if required_buff_sz:
buff_size.value = required_buff_sz
if raised_exc:
raise raised_exc
return mock.sentinel.ret_val
return fake_func
@mock.patch.object(iscsi_utils, '_get_items_from_buff')
def _test_ensure_buff_decorator(self, mock_get_items,
required_buff_sz=None,
returned_element_count=None,
parse_output=False):
insufficient_buff_exc = exceptions.Win32Exception(
message='fake_err_msg',
error_code=w_const.ERROR_INSUFFICIENT_BUFFER)
func_requests_buff_sz = required_buff_sz is not None
struct_type = ctypes.c_uint
decorator_args = dict(struct_type=struct_type,
parse_output=parse_output,
func_requests_buff_sz=func_requests_buff_sz)
func_side_effect = mock.Mock(side_effect=(insufficient_buff_exc, None))
fake_func = self._get_fake_iscsi_utils_getter_func(
returned_element_count=returned_element_count,
required_buff_sz=required_buff_sz,
func_side_effect=func_side_effect,
decorator_args=decorator_args)
ret_val = fake_func(self._initiator, fake_arg=mock.sentinel.arg)
if parse_output:
self.assertEqual(mock_get_items.return_value, ret_val)
else:
self.assertEqual(mock.sentinel.ret_val, ret_val)
# We expect our decorated method to be called exactly two times.
first_call_args_dict = func_side_effect.call_args_list[0][1]
self.assertIsInstance(first_call_args_dict['buff'],
ctypes.POINTER(struct_type))
self.assertEqual(first_call_args_dict['buff_size_val'], 0)
self.assertEqual(first_call_args_dict['element_count_val'], 0)
second_call_args_dict = func_side_effect.call_args_list[1][1]
self.assertIsInstance(second_call_args_dict['buff'],
ctypes.POINTER(struct_type))
self.assertEqual(second_call_args_dict['buff_size_val'],
required_buff_sz or 0)
self.assertEqual(second_call_args_dict['element_count_val'],
returned_element_count or 0)
def test_ensure_buff_func_requests_buff_sz(self):
self._test_ensure_buff_decorator(required_buff_sz=10,
parse_output=True)
def test_ensure_buff_func_requests_el_count(self):
self._test_ensure_buff_decorator(returned_element_count=5)
def test_ensure_buff_func_unexpected_exception(self):
fake_exc = exceptions.Win32Exception(message='fake_message',
error_code=1)
func_side_effect = mock.Mock(side_effect=fake_exc)
fake_func = self._get_fake_iscsi_utils_getter_func(
func_side_effect=func_side_effect,
decorator_args={'struct_type': ctypes.c_ubyte})
self.assertRaises(exceptions.Win32Exception, fake_func,
self._initiator)
def test_get_items_from_buff(self):
fake_buff_contents = 'fake_buff_contents'
fake_buff = (ctypes.c_wchar * len(fake_buff_contents))()
fake_buff.value = fake_buff_contents
fake_buff = ctypes.cast(fake_buff, ctypes.POINTER(ctypes.c_ubyte))
result = iscsi_utils._get_items_from_buff(fake_buff, ctypes.c_wchar,
len(fake_buff_contents))
self.assertEqual(fake_buff_contents, result.value)
def test_run_and_check_output(self):
self._run_mocker.stop()
self._initiator._win32utils = mock.Mock()
mock_win32utils_run_and_check_output = (
self._initiator._win32utils.run_and_check_output)
self._initiator._run_and_check_output(mock.sentinel.func,
mock.sentinel.arg,
fake_kwarg=mock.sentinel.kwarg)
mock_win32utils_run_and_check_output.assert_called_once_with(
mock.sentinel.func,
mock.sentinel.arg,
fake_kwarg=mock.sentinel.kwarg,
error_msg_src=iscsierr.err_msg_dict,
failure_exc=exceptions.ISCSIInitiatorAPIException)
def test_get_iscsi_persistent_logins(self):
self._mock_ctypes()
_get_iscsi_persistent_logins = _utils.get_wrapped_function(
self._initiator._get_iscsi_persistent_logins)
_get_iscsi_persistent_logins(
self._initiator,
buff=mock.sentinel.buff,
buff_size=mock.sentinel.buff_size,
element_count=mock.sentinel.element_count)
self._mock_run.assert_called_once_with(
self._iscsidsc.ReportIScsiPersistentLoginsW,
self._ctypes.byref(mock.sentinel.element_count),
mock.sentinel.buff,
self._ctypes.byref(mock.sentinel.buff_size))
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_parse_string_list')
def test_get_targets(self, mock_parse_string_list):
self._mock_ctypes()
get_targets = _utils.get_wrapped_function(
self._initiator.get_targets)
mock_el_count = mock.Mock(value=mock.sentinel.element_count)
resulted_target_list = get_targets(
self._initiator,
forced_update=mock.sentinel.forced_update,
element_count=mock_el_count,
buff=mock.sentinel.buff)
self.assertEqual(mock_parse_string_list.return_value,
resulted_target_list)
self._mock_run.assert_called_once_with(
self._iscsidsc.ReportIScsiTargetsW,
mock.sentinel.forced_update,
self._ctypes.byref(mock_el_count),
mock.sentinel.buff)
mock_parse_string_list.assert_called_once_with(
mock.sentinel.buff, mock.sentinel.element_count)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_parse_string_list')
def test_get_initiators(self, mock_parse_string_list):
self._mock_ctypes()
get_initiators = _utils.get_wrapped_function(
self._initiator.get_iscsi_initiators)
mock_el_count = mock.Mock(value=mock.sentinel.element_count)
resulted_initator_list = get_initiators(
self._initiator,
element_count=mock_el_count,
buff=mock.sentinel.buff)
self.assertEqual(mock_parse_string_list.return_value,
resulted_initator_list)
self._mock_run.assert_called_once_with(
self._iscsidsc.ReportIScsiInitiatorListW,
self._ctypes.byref(mock_el_count),
mock.sentinel.buff)
mock_parse_string_list.assert_called_once_with(
mock.sentinel.buff, mock.sentinel.element_count)
def test_parse_string_list(self):
self._mock_ctypes()
fake_buff = 'fake\x00buff\x00\x00'
self._ctypes.cast.return_value = fake_buff
str_list = self._initiator._parse_string_list(fake_buff,
len(fake_buff))
self.assertEqual(['fake', 'buff'], str_list)
self._ctypes.cast.assert_called_once_with(
fake_buff, self._ctypes.POINTER.return_value)
self._ctypes.POINTER.assert_called_once_with(self._ctypes.c_wchar)
def test_get_iscsi_initiator(self):
self._mock_ctypes()
self._ctypes.c_wchar = mock.MagicMock()
fake_buff = (self._ctypes.c_wchar * (
w_const.MAX_ISCSI_NAME_LEN + 1))()
fake_buff.value = mock.sentinel.buff_value
resulted_iscsi_initiator = self._initiator.get_iscsi_initiator()
self._mock_run.assert_called_once_with(
self._iscsidsc.GetIScsiInitiatorNodeNameW,
fake_buff)
self.assertEqual(mock.sentinel.buff_value,
resulted_iscsi_initiator)
@mock.patch('socket.getfqdn')
def test_get_iscsi_initiator_exception(self, mock_get_fqdn):
fake_fqdn = 'fakehost.FAKE-DOMAIN.com'
fake_exc = exceptions.ISCSIInitiatorAPIException(
message='fake_message',
error_code=1,
func_name='fake_func')
self._mock_run.side_effect = fake_exc
mock_get_fqdn.return_value = fake_fqdn
resulted_iqn = self._initiator.get_iscsi_initiator()
expected_iqn = "%s:%s" % (self._initiator._MS_IQN_PREFIX,
fake_fqdn.lower())
self.assertEqual(expected_iqn, resulted_iqn)
@mock.patch.object(ctypes, 'byref')
@mock.patch.object(iscsi_struct, 'ISCSI_UNIQUE_CONNECTION_ID')
@mock.patch.object(iscsi_struct, 'ISCSI_UNIQUE_SESSION_ID')
def test_login_iscsi_target(self, mock_cls_ISCSI_UNIQUE_SESSION_ID,
mock_cls_ISCSI_UNIQUE_CONNECTION_ID,
mock_byref):
fake_target_name = 'fake_target_name'
resulted_session_id, resulted_conection_id = (
self._initiator._login_iscsi_target(fake_target_name))
args_list = self._mock_run.call_args_list[0][0]
self.assertIsInstance(args_list[1], ctypes.c_wchar_p)
self.assertEqual(fake_target_name, args_list[1].value)
self.assertIsInstance(args_list[4], ctypes.c_ulong)
self.assertEqual(
ctypes.c_ulong(w_const.ISCSI_ANY_INITIATOR_PORT).value,
args_list[4].value)
self.assertIsInstance(args_list[6], ctypes.c_ulonglong)
self.assertEqual(0, args_list[6].value)
self.assertIsInstance(args_list[9], ctypes.c_ulong)
self.assertEqual(0, args_list[9].value)
mock_byref.assert_has_calls([
mock.call(mock_cls_ISCSI_UNIQUE_SESSION_ID.return_value),
mock.call(mock_cls_ISCSI_UNIQUE_CONNECTION_ID.return_value)])
self.assertEqual(
mock_cls_ISCSI_UNIQUE_SESSION_ID.return_value,
resulted_session_id)
self.assertEqual(
mock_cls_ISCSI_UNIQUE_CONNECTION_ID.return_value,
resulted_conection_id)
def test_get_iscsi_sessions(self):
self._mock_ctypes()
_get_iscsi_sessions = _utils.get_wrapped_function(
self._initiator._get_iscsi_sessions)
_get_iscsi_sessions(
self._initiator,
buff=mock.sentinel.buff,
buff_size=mock.sentinel.buff_size,
element_count=mock.sentinel.element_count)
self._mock_run.assert_called_once_with(
self._iscsidsc.GetIScsiSessionListW,
self._ctypes.byref(mock.sentinel.buff_size),
self._ctypes.byref(mock.sentinel.element_count),
mock.sentinel.buff)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_sessions')
def test_get_iscsi_target_sessions(self, mock_get_iscsi_sessions,
target_sessions_found=True):
fake_session = mock.Mock(TargetNodeName="FAKE_TARGET_NAME",
ConnectionCount=1)
fake_disconn_session = mock.Mock(
TargetNodeName="fake_target_name",
ConnectionCount=0)
other_session = mock.Mock(TargetNodeName="other_target_name",
ConnectionCount=1)
sessions = [fake_session, fake_disconn_session, other_session]
mock_get_iscsi_sessions.return_value = sessions
resulted_tgt_sessions = self._initiator._get_iscsi_target_sessions(
"fake_target_name")
self.assertEqual([fake_session], resulted_tgt_sessions)
def test_get_iscsi_session_devices(self):
self._mock_ctypes()
_get_iscsi_session_devices = _utils.get_wrapped_function(
self._initiator._get_iscsi_session_devices)
_get_iscsi_session_devices(
self._initiator,
mock.sentinel.session_id,
buff=mock.sentinel.buff,
element_count=mock.sentinel.element_count)
self._mock_run.assert_called_once_with(
self._iscsidsc.GetDevicesForIScsiSessionW,
self._ctypes.byref(mock.sentinel.session_id),
self._ctypes.byref(mock.sentinel.element_count),
mock.sentinel.buff)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_session_devices')
def test_get_iscsi_session_luns(self, mock_get_iscsi_session_devices):
fake_device = mock.Mock()
fake_device.StorageDeviceNumber.DeviceType = w_const.FILE_DEVICE_DISK
mock_get_iscsi_session_devices.return_value = [fake_device,
mock.Mock()]
resulted_luns = self._initiator._get_iscsi_session_disk_luns(
mock.sentinel.session_id)
expected_luns = [fake_device.ScsiAddress.Lun]
mock_get_iscsi_session_devices.assert_called_once_with(
mock.sentinel.session_id)
self.assertEqual(expected_luns, resulted_luns)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_session_devices')
def test_get_iscsi_device_from_session(self,
mock_get_iscsi_session_devices):
fake_device = mock.Mock()
fake_device.ScsiAddress.Lun = mock.sentinel.target_lun
mock_get_iscsi_session_devices.return_value = [mock.Mock(),
fake_device]
resulted_device = self._initiator._get_iscsi_device_from_session(
mock.sentinel.session_id,
mock.sentinel.target_lun)
mock_get_iscsi_session_devices.assert_called_once_with(
mock.sentinel.session_id)
self.assertEqual(fake_device, resulted_device)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'get_device_number_and_path')
def test_get_device_number_for_target(self, mock_get_dev_num_and_path):
dev_num = self._initiator.get_device_number_for_target(
mock.sentinel.target_name, mock.sentinel.lun,
mock.sentinel.fail_if_not_found)
mock_get_dev_num_and_path.assert_called_once_with(
mock.sentinel.target_name, mock.sentinel.lun,
mock.sentinel.fail_if_not_found)
self.assertEqual(mock_get_dev_num_and_path.return_value[0], dev_num)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'ensure_lun_available')
def test_get_device_number_and_path(self, mock_ensure_lun_available):
mock_ensure_lun_available.return_value = (mock.sentinel.dev_num,
mock.sentinel.dev_path)
dev_num, dev_path = self._initiator.get_device_number_and_path(
mock.sentinel.target_name, mock.sentinel.lun,
retry_attempts=mock.sentinel.retry_attempts,
retry_interval=mock.sentinel.retry_interval,
rescan_disks=mock.sentinel.rescan_disks,
ensure_mpio_claimed=mock.sentinel.ensure_mpio_claimed)
mock_ensure_lun_available.assert_called_once_with(
mock.sentinel.target_name, mock.sentinel.lun,
rescan_attempts=mock.sentinel.retry_attempts,
retry_interval=mock.sentinel.retry_interval,
rescan_disks=mock.sentinel.rescan_disks,
ensure_mpio_claimed=mock.sentinel.ensure_mpio_claimed)
self.assertEqual(mock.sentinel.dev_num, dev_num)
self.assertEqual(mock.sentinel.dev_path, dev_path)
@ddt.data(True, False)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'ensure_lun_available')
def test_get_device_number_and_path_exc(self, fail_if_not_found,
mock_ensure_lun_available):
raised_exc = exceptions.ISCSILunNotAvailable
mock_ensure_lun_available.side_effect = raised_exc(
target_iqn=mock.sentinel.target_iqn,
target_lun=mock.sentinel.target_lun)
if fail_if_not_found:
self.assertRaises(raised_exc,
self._initiator.get_device_number_and_path,
mock.sentinel.target_name,
mock.sentinel.lun,
fail_if_not_found)
else:
dev_num, dev_path = self._initiator.get_device_number_and_path(
mock.sentinel.target_name,
mock.sentinel.lun,
fail_if_not_found)
self.assertIsNone(dev_num)
self.assertIsNone(dev_path)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_target_sessions')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_session_disk_luns')
def test_get_target_luns(self, mock_get_iscsi_session_disk_luns,
mock_get_iscsi_target_sessions):
fake_session = mock.Mock()
mock_get_iscsi_target_sessions.return_value = [fake_session]
retrieved_luns = [mock.sentinel.lun_0]
mock_get_iscsi_session_disk_luns.return_value = retrieved_luns
resulted_luns = self._initiator.get_target_luns(
mock.sentinel.target_name)
mock_get_iscsi_target_sessions.assert_called_once_with(
mock.sentinel.target_name)
mock_get_iscsi_session_disk_luns.assert_called_once_with(
fake_session.SessionId)
self.assertEqual(retrieved_luns, resulted_luns)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'get_target_luns')
def test_get_target_lun_count(self, mock_get_target_luns):
target_luns = [mock.sentinel.lun0, mock.sentinel.lun1]
mock_get_target_luns.return_value = target_luns
lun_count = self._initiator.get_target_lun_count(
mock.sentinel.target_name)
self.assertEqual(len(target_luns), lun_count)
mock_get_target_luns.assert_called_once_with(
mock.sentinel.target_name)
def test_logout_iscsi_target(self):
self._mock_ctypes()
self._initiator._logout_iscsi_target(mock.sentinel.session_id)
self._mock_run.assert_called_once_with(
self._iscsidsc.LogoutIScsiTarget,
self._ctypes.byref(mock.sentinel.session_id))
def test_add_static_target(self):
self._mock_ctypes()
is_persistent = True
self._initiator._add_static_target(mock.sentinel.target_name,
is_persistent=is_persistent)
self._mock_run.assert_called_once_with(
self._iscsidsc.AddIScsiStaticTargetW,
self._ctypes.c_wchar_p(mock.sentinel.target_name),
None, 0, is_persistent, None, None, None)
def test_remove_static_target(self):
self._mock_ctypes()
self._initiator._remove_static_target(mock.sentinel.target_name)
expected_ignored_err_codes = [w_const.ISDSC_TARGET_NOT_FOUND]
self._mock_run.assert_called_once_with(
self._iscsidsc.RemoveIScsiStaticTargetW,
self._ctypes.c_wchar_p(mock.sentinel.target_name),
ignored_error_codes=expected_ignored_err_codes)
def test_get_login_opts(self):
fake_username = 'fake_chap_username'
fake_password = 'fake_chap_secret'
auth_type = constants.ISCSI_CHAP_AUTH_TYPE
login_flags = w_const.ISCSI_LOGIN_FLAG_MULTIPATH_ENABLED
login_opts = self._initiator._get_login_opts(
auth_username=fake_username,
auth_password=fake_password,
auth_type=auth_type,
login_flags=login_flags)
self.assertEqual(len(fake_username), login_opts.UsernameLength)
self.assertEqual(len(fake_password), login_opts.PasswordLength)
username_struct_contents = ctypes.cast(
login_opts.Username,
ctypes.POINTER(ctypes.c_char * len(fake_username))).contents.value
pwd_struct_contents = ctypes.cast(
login_opts.Password,
ctypes.POINTER(ctypes.c_char * len(fake_password))).contents.value
self.assertEqual(six.b(fake_username), username_struct_contents)
self.assertEqual(six.b(fake_password), pwd_struct_contents)
expected_info_bitmap = (w_const.ISCSI_LOGIN_OPTIONS_USERNAME |
w_const.ISCSI_LOGIN_OPTIONS_PASSWORD |
w_const.ISCSI_LOGIN_OPTIONS_AUTH_TYPE)
self.assertEqual(expected_info_bitmap,
login_opts.InformationSpecified)
self.assertEqual(login_flags,
login_opts.LoginFlags)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_session_devices')
def test_session_on_path_exists(self, mock_get_iscsi_session_devices):
mock_device = mock.Mock(InitiatorName=mock.sentinel.initiator_name)
mock_get_iscsi_session_devices.return_value = [mock_device]
fake_connection = mock.Mock(TargetAddress=mock.sentinel.portal_addr,
TargetSocket=mock.sentinel.portal_port)
fake_connections = [mock.Mock(), fake_connection]
fake_session = mock.Mock(ConnectionCount=len(fake_connections),
Connections=fake_connections)
fake_sessions = [mock.Mock(Connections=[], ConnectionCount=0),
fake_session]
session_on_path_exists = self._initiator._session_on_path_exists(
fake_sessions, mock.sentinel.portal_addr,
mock.sentinel.portal_port,
mock.sentinel.initiator_name)
self.assertTrue(session_on_path_exists)
mock_get_iscsi_session_devices.assert_has_calls(
[mock.call(session.SessionId) for session in fake_sessions])
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_target_sessions')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_session_on_path_exists')
def _test_new_session_required(self, mock_session_on_path_exists,
mock_get_iscsi_target_sessions,
sessions=None,
mpio_enabled=False,
session_on_path_exists=False):
mock_get_iscsi_target_sessions.return_value = sessions
mock_session_on_path_exists.return_value = session_on_path_exists
expected_result = (not sessions or
(mpio_enabled and not session_on_path_exists))
result = self._initiator._new_session_required(
mock.sentinel.target_iqn,
mock.sentinel.portal_addr,
mock.sentinel.portal_port,
mock.sentinel.initiator_name,
mpio_enabled)
self.assertEqual(expected_result, result)
if sessions and mpio_enabled:
mock_session_on_path_exists.assert_called_once_with(
sessions,
mock.sentinel.portal_addr,
mock.sentinel.portal_port,
mock.sentinel.initiator_name)
def test_new_session_required_no_sessions(self):
self._test_new_session_required()
def test_new_session_required_existing_sessions_no_mpio(self):
self._test_new_session_required(sessions=mock.sentinel.sessions)
def test_new_session_required_existing_sessions_mpio_enabled(self):
self._test_new_session_required(sessions=mock.sentinel.sessions,
mpio_enabled=True)
def test_new_session_required_session_on_path_exists(self):
self._test_new_session_required(sessions=mock.sentinel.sessions,
mpio_enabled=True,
session_on_path_exists=True)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_login_opts')
@mock.patch.object(iscsi_struct, 'ISCSI_TARGET_PORTAL')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_new_session_required')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, 'get_targets')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, '_login_iscsi_target')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'ensure_lun_available')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_add_static_target')
def _test_login_storage_target(self, mock_add_static_target,
mock_ensure_lun_available,
mock_login_iscsi_target,
mock_get_targets,
mock_session_required,
mock_cls_ISCSI_TARGET_PORTAL,
mock_get_login_opts,
mpio_enabled=False,
login_required=True):
fake_portal_addr = '127.0.0.1'
fake_portal_port = 3260
fake_target_portal = '%s:%s' % (fake_portal_addr, fake_portal_port)
fake_portal = mock_cls_ISCSI_TARGET_PORTAL.return_value
fake_login_opts = mock_get_login_opts.return_value
mock_get_targets.return_value = []
mock_login_iscsi_target.return_value = (mock.sentinel.session_id,
mock.sentinel.conn_id)
mock_session_required.return_value = login_required
self._initiator.login_storage_target(
mock.sentinel.target_lun,
mock.sentinel.target_iqn,
fake_target_portal,
auth_username=mock.sentinel.auth_username,
auth_password=mock.sentinel.auth_password,
auth_type=mock.sentinel.auth_type,
mpio_enabled=mpio_enabled,
rescan_attempts=mock.sentinel.rescan_attempts)
mock_get_targets.assert_called_once_with()
mock_add_static_target.assert_called_once_with(
mock.sentinel.target_iqn)
if login_required:
expected_login_flags = (
w_const.ISCSI_LOGIN_FLAG_MULTIPATH_ENABLED
if mpio_enabled else 0)
mock_get_login_opts.assert_called_once_with(
mock.sentinel.auth_username,
mock.sentinel.auth_password,
mock.sentinel.auth_type,
expected_login_flags)
mock_cls_ISCSI_TARGET_PORTAL.assert_called_once_with(
Address=fake_portal_addr,
Socket=fake_portal_port)
mock_login_iscsi_target.assert_has_calls([
mock.call(mock.sentinel.target_iqn,
fake_portal,
fake_login_opts,
is_persistent=True),
mock.call(mock.sentinel.target_iqn,
fake_portal,
fake_login_opts,
is_persistent=False)])
else:
self.assertFalse(mock_login_iscsi_target.called)
mock_ensure_lun_available.assert_called_once_with(
mock.sentinel.target_iqn,
mock.sentinel.target_lun,
mock.sentinel.rescan_attempts)
def test_login_storage_target_path_exists(self):
self._test_login_storage_target(login_required=False)
def test_login_new_storage_target_no_mpio(self):
self._test_login_storage_target()
def test_login_storage_target_new_path_using_mpio(self):
self._test_login_storage_target(mpio_enabled=True)
@ddt.data(dict(rescan_disks=True),
dict(retry_interval=mock.sentinel.retry_interval))
@ddt.unpack
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_device_from_session')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_target_sessions')
@mock.patch('time.sleep')
def test_ensure_lun_available(self, mock_sleep,
mock_get_iscsi_target_sessions,
mock_get_iscsi_device_from_session,
rescan_disks=False, retry_interval=0):
retry_count = 6
mock_get_iscsi_target_sessions.return_value = [
mock.Mock(SessionId=mock.sentinel.session_id)]
fake_exc = exceptions.ISCSIInitiatorAPIException(
message='fake_message',
error_code=1,
func_name='fake_func')
dev_num_side_eff = [None, -1] + [mock.sentinel.dev_num] * 3
dev_path_side_eff = ([mock.sentinel.dev_path] * 2 +
[None] + [mock.sentinel.dev_path] * 2)
fake_device = mock.Mock()
type(fake_device.StorageDeviceNumber).DeviceNumber = (
mock.PropertyMock(side_effect=dev_num_side_eff))
type(fake_device).LegacyName = (
mock.PropertyMock(side_effect=dev_path_side_eff))
mock_get_dev_side_eff = [None, fake_exc] + [fake_device] * 5
mock_get_iscsi_device_from_session.side_effect = mock_get_dev_side_eff
self._diskutils.is_mpio_disk.side_effect = [False, True]
dev_num, dev_path = self._initiator.ensure_lun_available(
mock.sentinel.target_iqn,
mock.sentinel.target_lun,
rescan_attempts=retry_count,
retry_interval=retry_interval,
rescan_disks=rescan_disks,
ensure_mpio_claimed=True)
self.assertEqual(mock.sentinel.dev_num, dev_num)
self.assertEqual(mock.sentinel.dev_path, dev_path)
mock_get_iscsi_target_sessions.assert_has_calls(
[mock.call(mock.sentinel.target_iqn)] * (retry_count + 1))
mock_get_iscsi_device_from_session.assert_has_calls(
[mock.call(mock.sentinel.session_id,
mock.sentinel.target_lun)] * retry_count)
self._diskutils.is_mpio_disk.assert_has_calls(
[mock.call(mock.sentinel.dev_num)] * 2)
expected_rescan_count = retry_count if rescan_disks else 0
self.assertEqual(
expected_rescan_count,
self._diskutils.rescan_disks.call_count)
if retry_interval:
mock_sleep.assert_has_calls(
[mock.call(retry_interval)] * retry_count)
else:
self.assertFalse(mock_sleep.called)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_target_sessions')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_logout_iscsi_target')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_remove_target_persistent_logins')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_remove_static_target')
def test_logout_storage_target(self, mock_remove_static_target,
mock_remove_target_persistent_logins,
mock_logout_iscsi_target,
mock_get_iscsi_target_sessions):
fake_session = mock.Mock(SessionId=mock.sentinel.session_id)
mock_get_iscsi_target_sessions.return_value = [fake_session]
self._initiator.logout_storage_target(mock.sentinel.target_iqn)
mock_get_iscsi_target_sessions.assert_called_once_with(
mock.sentinel.target_iqn, connected_only=False)
mock_logout_iscsi_target.assert_called_once_with(
mock.sentinel.session_id)
mock_remove_target_persistent_logins.assert_called_once_with(
mock.sentinel.target_iqn)
mock_remove_static_target.assert_called_once_with(
mock.sentinel.target_iqn)
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_remove_persistent_login')
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
'_get_iscsi_persistent_logins')
def test_remove_target_persistent_logins(self,
mock_get_iscsi_persistent_logins,
mock_remove_persistent_login):
fake_persistent_login = mock.Mock(TargetName=mock.sentinel.target_iqn)
mock_get_iscsi_persistent_logins.return_value = [fake_persistent_login]
self._initiator._remove_target_persistent_logins(
mock.sentinel.target_iqn)
mock_remove_persistent_login.assert_called_once_with(
fake_persistent_login)
mock_get_iscsi_persistent_logins.assert_called_once_with()
@mock.patch.object(ctypes, 'byref')
def test_remove_persistent_login(self, mock_byref):
fake_persistent_login = mock.Mock()
fake_persistent_login.InitiatorInstance = 'fake_initiator_instance'
fake_persistent_login.TargetName = 'fake_target_name'
self._initiator._remove_persistent_login(fake_persistent_login)
args_list = self._mock_run.call_args_list[0][0]
self.assertIsInstance(args_list[1], ctypes.c_wchar_p)
self.assertEqual(fake_persistent_login.InitiatorInstance,
args_list[1].value)
self.assertIsInstance(args_list[3], ctypes.c_wchar_p)
self.assertEqual(fake_persistent_login.TargetName,
args_list[3].value)
mock_byref.assert_called_once_with(fake_persistent_login.TargetPortal)
| {
"content_hash": "c6fe025f02fd11955119dad9551699bc",
"timestamp": "",
"source": "github",
"line_count": 833,
"max_line_length": 79,
"avg_line_length": 43.6062424969988,
"alnum_prop": 0.5985574275960798,
"repo_name": "openstack/os-win",
"id": "31c25146797a8294bb4871db29737ea690af918b",
"size": "36965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os_win/tests/unit/utils/storage/initiator/test_iscsi_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1114520"
}
],
"symlink_target": ""
} |
"""
Support for Xiaomi Mi Temp BLE environmental sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mitemp_bt/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_FORCE_UPDATE, CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_MAC
)
REQUIREMENTS = ['mitemp_bt==0.0.1']
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = 'adapter'
CONF_CACHE = 'cache_value'
CONF_MEDIAN = 'median'
CONF_RETRIES = 'retries'
CONF_TIMEOUT = 'timeout'
DEFAULT_ADAPTER = 'hci0'
DEFAULT_UPDATE_INTERVAL = 300
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = 'MiTemp BT'
DEFAULT_RETRIES = 2
DEFAULT_TIMEOUT = 10
# Sensor types are defined like: Name, units
SENSOR_TYPES = {
'temperature': ['Temperature', '°C'],
'humidity': ['Humidity', '%'],
'battery': ['Battery', '%'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_RETRIES, default=DEFAULT_RETRIES): cv.positive_int,
vol.Optional(CONF_CACHE, default=DEFAULT_UPDATE_INTERVAL): cv.positive_int,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MiTempBt sensor."""
from mitemp_bt import mitemp_bt_poller
try:
import bluepy.btle # noqa: F401 pylint: disable=unused-import
from btlewrap import BluepyBackend
backend = BluepyBackend
except ImportError:
from btlewrap import GatttoolBackend
backend = GatttoolBackend
_LOGGER.debug('MiTempBt is using %s backend.', backend.__name__)
cache = config.get(CONF_CACHE)
poller = mitemp_bt_poller.MiTempBtPoller(
config.get(CONF_MAC), cache_timeout=cache,
adapter=config.get(CONF_ADAPTER), backend=backend)
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
poller.ble_timeout = config.get(CONF_TIMEOUT)
poller.retries = config.get(CONF_RETRIES)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
name = SENSOR_TYPES[parameter][0]
unit = SENSOR_TYPES[parameter][1]
prefix = config.get(CONF_NAME)
if prefix:
name = "{} {}".format(prefix, name)
devs.append(MiTempBtSensor(
poller, parameter, name, unit, force_update, median))
add_entities(devs)
class MiTempBtSensor(Entity):
"""Implementing the MiTempBt sensor."""
def __init__(self, poller, parameter, name, unit, force_update, median):
"""Initialize the sensor."""
self.poller = poller
self.parameter = parameter
self._unit = unit
self._name = name
self._state = None
self.data = []
self._force_update = force_update
# Median is used to filter out outliers. median of 3 will filter
# single outliers, while median of 5 will filter double outliers
# Use median_count = 1 if no filtering is required.
self.median_count = median
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""
Update current conditions.
This uses a rolling median over 3 values to filter out outliers.
"""
from btlewrap.base import BluetoothBackendException
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except IOError as ioerr:
_LOGGER.warning("Polling error %s", ioerr)
return
except BluetoothBackendException as bterror:
_LOGGER.warning("Polling error %s", bterror)
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
self.data.append(data)
else:
_LOGGER.warning("Did not receive any data from Mi Temp sensor %s",
self.name)
# Remove old data from median list or set sensor value to None
# if no data is available anymore
if self.data:
self.data = self.data[1:]
else:
self._state = None
return
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
else:
_LOGGER.debug("Not yet enough data for median calculation")
| {
"content_hash": "374751023a6e56991902fd29e0166789",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 79,
"avg_line_length": 32.45348837209303,
"alnum_prop": 0.6377642422070943,
"repo_name": "tinloaf/home-assistant",
"id": "15e225fd2c0eda9948ac71c65172246b43ebdc3d",
"size": "5583",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/mitemp_bt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import webnotes
import webnotes.widgets.query_builder
from webnotes.utils import cstr
from startup.query_handlers import standard_queries
# this is called when a new doctype is setup for search - to set the filters
@webnotes.whitelist()
def getsearchfields():
sf = webnotes.conn.sql("""\
SELECT value FROM `tabProperty Setter`
WHERE doc_type=%s AND property='search_fields'""", \
(webnotes.form_dict.get("doctype")))
if not (sf and len(sf)>0 and sf[0][0]):
sf = webnotes.conn.sql("select search_fields from tabDocType where name=%s", webnotes.form_dict.get("doctype"))
sf = sf and sf[0][0] or ''
sf = [s.strip() for s in sf.split(',')]
if sf and sf[0]:
res = webnotes.conn.sql("select fieldname, label, fieldtype, options from tabDocField where parent='%s' and fieldname in (%s)" % (webnotes.form_dict.get("doctype","_NA"), '"'+'","'.join(sf)+'"'))
else:
res = []
res = [[c or '' for c in r] for r in res]
for r in res:
if r[2]=='Select' and r[3] and r[3].startswith('link:'):
dt = r[3][5:]
ol = webnotes.conn.sql("select name from `tab%s` where docstatus!=2 order by name asc" % dt)
r[3] = '\n'.join([''] + [o[0] for o in ol])
webnotes.response['searchfields'] = [['name', 'ID', 'Data', '']] + res
# this is called by the Link Field
@webnotes.whitelist()
def search_link(dt, txt, query=None, filters=None):
search_widget(dt, txt, query, page_len=10, filters=filters)
webnotes.response['results'] = build_for_autosuggest(webnotes.response["values"])
# this is called by the search box
@webnotes.whitelist()
def search_widget(doctype, txt, query=None, searchfield="name", start=0, page_len=50, filters=None):
if isinstance(filters, basestring):
import json
filters = json.loads(filters)
if query and query.split()[0].lower()!="select":
webnotes.response["values"] = webnotes.get_method(query)(doctype, txt,
searchfield, start, page_len, filters)
elif not query and doctype in standard_queries:
search_widget(doctype, txt, standard_queries[doctype],
searchfield, start, page_len, filters)
else:
if query:
webnotes.response["values"] = webnotes.conn.sql(scrub_custom_query(query,
searchfield, txt))
else:
if filters:
webnotes.response["values"] = get_query_result(
', '.join(get_std_fields_list(doctype, searchfield)), doctype, txt,
searchfield, start, page_len, filters)
else:
query = make_query(', '.join(get_std_fields_list(doctype, searchfield)), doctype,
searchfield, txt, start, page_len)
webnotes.widgets.query_builder.runquery(query)
def make_query(fields, dt, key, txt, start, length):
doctype = webnotes.get_doctype(dt)
enabled_condition = ""
if doctype.get({"parent":dt, "fieldname":"enabled", "fieldtype":"Check"}):
enabled_condition = " AND ifnull(`tab%s`.`enabled`,0)=1" % dt
if doctype.get({"parent":dt, "fieldname":"disabled", "fieldtype":"Check"}):
enabled_condition = " AND ifnull(`tab%s`.`disabled`,0)!=1" % dt
query = """select %(fields)s
FROM `tab%(dt)s`
WHERE `tab%(dt)s`.`%(key)s` LIKE '%(txt)s'
AND `tab%(dt)s`.docstatus != 2 %(enabled_condition)s
ORDER BY `tab%(dt)s`.`%(key)s`
ASC LIMIT %(start)s, %(len)s """ % {
'fields': fields,
'dt': dt,
'key': key,
'txt': txt + '%',
'start': start,
'len': length,
'enabled_condition': enabled_condition,
}
return query
def get_query_result(fields, dt, txt, searchfield, start, page_len, filters):
doctype = webnotes.get_doctype(dt)
enabled_condition = ""
if doctype.get({"parent":dt, "fieldname":"enabled", "fieldtype":"Check"}):
enabled_condition = " AND ifnull(`enabled`,0)=1 "
if doctype.get({"parent":dt, "fieldname":"disabled", "fieldtype":"Check"}):
enabled_condition = " AND ifnull(`disabled`,0)!=1"
filter_condition, filter_values = build_filter_conditions(filters)
args = {
'fields': fields,
'dt': dt,
'key': searchfield,
'txt': '%s',
'start': start,
'len': page_len,
'enabled_condition': enabled_condition,
'filter_condition': filter_condition
}
return webnotes.conn.sql("""select %(fields)s FROM `tab%(dt)s`
WHERE `%(key)s` LIKE %(txt)s
AND docstatus != 2 %(enabled_condition)s %(filter_condition)s
ORDER BY `%(key)s`
ASC LIMIT %(start)s, %(len)s""" % args,
tuple(["%%%s%%" % txt] + filter_values))
def build_filter_conditions(filters):
conditions, filter_values = [], []
for key in filters:
conditions.append('`' + key + '` = %s')
filter_values.append(filters[key])
conditions = conditions and " and " + " and ".join(conditions) or ""
return conditions, filter_values
def get_std_fields_list(dt, key):
# get additional search fields
sflist = webnotes.conn.sql("select search_fields from tabDocType where name = '%s'" % dt)
sflist = sflist and sflist[0][0] and sflist[0][0].split(',') or []
sflist = ['name'] + sflist
if not key in sflist:
sflist = sflist + [key]
return ['`tab%s`.`%s`' % (dt, f.strip()) for f in sflist]
def build_for_autosuggest(res):
results = []
for r in res:
info = ''
if len(r) > 1:
info = ', '.join([cstr(t) for t in r[1:]])
if len(info) > 50:
info = "<span title=\"%s\">%s...</span>" % (info, info[:50])
results.append({'label':r[0], 'value':r[0], 'info':info})
return results
def scrub_custom_query(query, key, txt):
if '%(key)s' in query:
query = query.replace('%(key)s', key)
if '%s' in query:
query = query.replace('%s', ((txt or '') + '%'))
return query | {
"content_hash": "0bb474369928d154731ee6a3a3a9fa7e",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 198,
"avg_line_length": 34.54430379746835,
"alnum_prop": 0.6504213997801392,
"repo_name": "gangadhar-kadam/mtn-wnframework",
"id": "34907a788312c993f54a3424212140f730c8d37f",
"size": "6636",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "webnotes/widgets/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2222822"
},
{
"name": "Python",
"bytes": "589708"
}
],
"symlink_target": ""
} |
"""Beam fn API log handler."""
# pytype: skip-file
# mypy: disallow-untyped-defs
import logging
import math
import queue
import sys
import threading
import time
import traceback
from typing import TYPE_CHECKING
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Union
from typing import cast
import grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from apache_beam.portability.api import endpoints_pb2
# This module is experimental. No backwards-compatibility guarantees.
class FnApiLogRecordHandler(logging.Handler):
"""A handler that writes log records to the fn API."""
# Maximum number of log entries in a single stream request.
_MAX_BATCH_SIZE = 1000
# Used to indicate the end of stream.
_FINISHED = Sentinel.sentinel
# Size of the queue used to buffer messages. Once full, messages will be
# dropped. If the average log size is 1KB this may use up to 10MB of memory.
_QUEUE_SIZE = 10000
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_fn_api_pb2.LogEntry.Severity.CRITICAL,
logging.ERROR: beam_fn_api_pb2.LogEntry.Severity.ERROR,
logging.WARNING: beam_fn_api_pb2.LogEntry.Severity.WARN,
logging.INFO: beam_fn_api_pb2.LogEntry.Severity.INFO,
logging.DEBUG: beam_fn_api_pb2.LogEntry.Severity.DEBUG,
-float('inf'): beam_fn_api_pb2.LogEntry.Severity.DEBUG,
}
def __init__(self, log_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> None
super().__init__()
self._alive = True
self._dropped_logs = 0
self._log_entry_queue = queue.Queue(
maxsize=self._QUEUE_SIZE
) # type: queue.Queue[Union[beam_fn_api_pb2.LogEntry, Sentinel]]
ch = GRPCChannelFactory.insecure_channel(log_service_descriptor.url)
# Make sure the channel is ready to avoid [BEAM-4649]
grpc.channel_ready_future(ch).result(timeout=60)
self._log_channel = grpc.intercept_channel(ch, WorkerIdInterceptor())
self._reader = threading.Thread(
target=lambda: self._read_log_control_messages(),
name='read_log_control_messages')
self._reader.daemon = True
self._reader.start()
def connect(self):
# type: () -> Iterable
if hasattr(self, '_logging_stub'):
del self._logging_stub # type: ignore[has-type]
self._logging_stub = beam_fn_api_pb2_grpc.BeamFnLoggingStub(
self._log_channel)
return self._logging_stub.Logging(self._write_log_entries())
def map_log_level(self, level):
# type: (int) -> beam_fn_api_pb2.LogEntry.Severity.Enum
try:
return self.LOG_LEVEL_MAP[level]
except KeyError:
return max(
beam_level for python_level,
beam_level in self.LOG_LEVEL_MAP.items() if python_level <= level)
def emit(self, record):
# type: (logging.LogRecord) -> None
log_entry = beam_fn_api_pb2.LogEntry()
log_entry.severity = self.map_log_level(record.levelno)
log_entry.message = self.format(record)
log_entry.thread = record.threadName
log_entry.log_location = '%s:%s' % (
record.pathname or record.module, record.lineno or record.funcName)
(fraction, seconds) = math.modf(record.created)
nanoseconds = 1e9 * fraction
log_entry.timestamp.seconds = int(seconds)
log_entry.timestamp.nanos = int(nanoseconds)
if record.exc_info:
log_entry.trace = ''.join(traceback.format_exception(*record.exc_info))
instruction_id = statesampler.get_current_instruction_id()
if instruction_id:
log_entry.instruction_id = instruction_id
tracker = statesampler.get_current_tracker()
if tracker:
current_state = tracker.current_state()
if (current_state and current_state.name_context and
current_state.name_context.transform_id):
log_entry.transform_id = current_state.name_context.transform_id
try:
self._log_entry_queue.put(log_entry, block=False)
except queue.Full:
self._dropped_logs += 1
def close(self):
# type: () -> None
"""Flush out all existing log entries and unregister this handler."""
try:
self._alive = False
# Acquiring the handler lock ensures ``emit`` is not run until the lock is
# released.
self.acquire()
self._log_entry_queue.put(self._FINISHED, timeout=5)
# wait on server to close.
self._reader.join()
self.release()
# Unregister this handler.
super().close()
except Exception:
# Log rather than raising exceptions, to avoid clobbering
# underlying errors that may have caused this to close
# prematurely.
logging.error("Error closing the logging channel.", exc_info=True)
def _write_log_entries(self):
# type: () -> Iterator[beam_fn_api_pb2.LogEntry.List]
done = False
while not done:
log_entries = [self._log_entry_queue.get()]
try:
for _ in range(self._MAX_BATCH_SIZE):
log_entries.append(self._log_entry_queue.get_nowait())
except queue.Empty:
pass
if log_entries[-1] is self._FINISHED:
done = True
log_entries.pop()
if log_entries:
# typing: log_entries was initialized as List[Union[..., Sentinel]],
# but now that we've popped the sentinel out (above) we can safely cast
yield beam_fn_api_pb2.LogEntry.List(
log_entries=cast(List[beam_fn_api_pb2.LogEntry], log_entries))
def _read_log_control_messages(self):
# type: () -> None
# Only reconnect when we are alive.
# We can drop some logs in the unlikely event of logging connection
# dropped(not closed) during termination when we still have logs to be sent.
# This case is unlikely and the chance of reconnection and successful
# transmission of logs is also very less as the process is terminating.
# I choose not to handle this case to avoid un-necessary code complexity.
alive = True # Force at least one connection attempt.
while alive:
# Loop for reconnection.
log_control_iterator = self.connect()
if self._dropped_logs > 0:
logging.warning(
"Dropped %d logs while logging client disconnected",
self._dropped_logs)
self._dropped_logs = 0
try:
for _ in log_control_iterator:
# Loop for consuming messages from server.
# TODO(vikasrk): Handle control messages.
pass
# iterator is closed
return
except Exception as ex:
print(
"Logging client failed: {}... resetting".format(ex),
file=sys.stderr)
# Wait a bit before trying a reconnect
time.sleep(0.5) # 0.5 seconds
alive = self._alive
| {
"content_hash": "10ecc54476c1d2efca8a128f7458a45d",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 80,
"avg_line_length": 36.60621761658031,
"alnum_prop": 0.6767162066525124,
"repo_name": "robertwb/incubator-beam",
"id": "75cdcf5fb85ffb57ae9de4bde0076ec1fa4cc80b",
"size": "7850",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/worker/log_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "59582"
},
{
"name": "Dart",
"bytes": "541526"
},
{
"name": "Dockerfile",
"bytes": "48191"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "4688736"
},
{
"name": "Groovy",
"bytes": "888171"
},
{
"name": "HCL",
"bytes": "101646"
},
{
"name": "HTML",
"bytes": "164685"
},
{
"name": "Java",
"bytes": "38649211"
},
{
"name": "JavaScript",
"bytes": "105966"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "209531"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "9785295"
},
{
"name": "SCSS",
"bytes": "312814"
},
{
"name": "Sass",
"bytes": "19336"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "336583"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "181369"
}
],
"symlink_target": ""
} |
from allthepairs.all_pairs2 import all_pairs2 as all_pairs
"""
Demo of filtering capabilities
"""
# sample parameters are is taken from
# http://www.stsc.hill.af.mil/consulting/sw_testing/improvement/cst.html
parameters = [ [ "Brand X", "Brand Y" ]
, [ "98", "NT", "2000", "XP"]
, [ "Internal", "Modem" ]
, [ "Salaried", "Hourly", "Part-Time", "Contr." ]
, [ 6, 10, 15, 30, 60 ]
]
def is_valid_combination( row ):
"""
Should return True if combination is valid and False otherwise.
Test row that is passed here can be incomplete.
To prevent search for unnecessary items filtering function
is executed with found subset of data to validate it.
"""
n = len(row)
if n>1:
# Brand Y does not support Windows 98
if "98" == row[1] and "Brand Y" == row[0]:
return False
# Brand X does not work with XP
if "XP" == row[1] and "Brand X" == row[0]:
return False
if n > 4:
# Contractors are billed in 30 min increments
if "Contr." == row[3] and row[4] < 30:
return False
return True
pairwise = all_pairs( parameters, filter_func = is_valid_combination )
for i, v in enumerate(pairwise):
print("%i:\t%s" % (i, str(v))) | {
"content_hash": "f138ff3fe97066c67ab8669676c67f28",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 72,
"avg_line_length": 28.434782608695652,
"alnum_prop": 0.5756880733944955,
"repo_name": "devalbo/allthepairs",
"id": "2db2e603282c30c7f992865433226551c4c8ad92",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example2_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14143"
}
],
"symlink_target": ""
} |
"""Implement a hash table of a given size"""
class HashTable(object):
def __init__(self, size):
if int(size) != size:
raise TypeError(u"size must be an integer")
self.size = size
self.hlist = []
for i in range(size):
self.hlist.append([])
def get(self, key):
index = self.hash(key)
index_list = self.hlist[index]
for pair in index_list:
if pair[0] == key:
return pair[1]
def set(self, key, val):
if str(key) != key:
raise TypeError(u"key must be a string.")
index = self.hash(key)
self.hlist[index].append((key, val))
def hash(self, key):
h_index = 0
for char in key:
h_index = (h_index + ord(char)) % self.size
return h_index
| {
"content_hash": "f0f57f7088cb23a3a3c13a32a5c69176",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 55,
"avg_line_length": 25.9375,
"alnum_prop": 0.5144578313253012,
"repo_name": "miracode/data-structures",
"id": "d673c0c5cfbbd3f3ab7790bc1a87311a780371a6",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hash_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89889"
}
],
"symlink_target": ""
} |
import requests
SCHEMAS_BASE_PATH='./schemas/'
#: runtime schema cache to avoid net/disk I/O
SCHEMAS = {}
def do_get_schema(endpoint, name):
'''
Download and cache schema ``name`` in memory.
'''
if name in SCHEMAS:
return SCHEMAS['name']
url = endpoint+name
print "Downloading schema", name
SCHEMAS[name] = requests.get(url).json()
return SCHEMAS[name]
def load_schemas(endpoint):
'''
Download and installs json API schema for ``client`` and save them for
future use.
'''
root_schema = do_get_schema(endpoint, '/')
for api in root_schema['apis']:
schema_name = api['schema'].format(path=api['path'], format='json')
do_get_schema(endpoint, schema_name)
print ""
| {
"content_hash": "fc130cf4b448419f8814a42f4beb159c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 25.033333333333335,
"alnum_prop": 0.62982689747004,
"repo_name": "yadutaf/ovh-cli",
"id": "0ec22dc4655114df6bac604aff9b6920b981b9f3",
"size": "778",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ovhcli/schema.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31911"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2009 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
IRC color specifications
"""
import re
import gtk
from gettext import gettext as _
from .. import config
from ..typecheck import types
from . import code
from . import escape
from ..lib import contrast
from .. import gui
COLOR_PATTERN = "([0-9]{1,2})(,[0-9]{1,2}){0,1}.*"
COLOR_TABLE = {
0: contrast.CONTRAST_COLOR_WHITE,
1: contrast.CONTRAST_COLOR_BLACK,
2: contrast.CONTRAST_COLOR_BLUE,
3: contrast.CONTRAST_COLOR_DARK_GREEN,
4: contrast.CONTRAST_COLOR_DARK_RED,
5: contrast.CONTRAST_COLOR_LIGHT_BROWN,
6: contrast.CONTRAST_COLOR_PURPLE,
7: contrast.CONTRAST_COLOR_ORANGE,
8: contrast.CONTRAST_COLOR_YELLOW,
9: contrast.CONTRAST_COLOR_LIGHT_GREEN,
10: contrast.CONTRAST_COLOR_CYAN,
11: contrast.CONTRAST_COLOR_AQUA,
12: contrast.CONTRAST_COLOR_LIGHT_BLUE,
13: contrast.CONTRAST_COLOR_MAGENTA,
14: contrast.CONTRAST_COLOR_GREY,
15: contrast.CONTRAST_COLOR_LIGHT_GREY
}
COLOR_NAMES = {
0: _("white"),
1: _("black"),
2: _("blue"),
3: _("dark green"),
4: _("dark red"),
5: _("light brown"),
6: _("purple"),
7: _("orange"),
8: _("yellow"),
9: _("light green"),
10: _("cyan"),
11: _("aqua"),
12: _("light blue"),
13: _("magenta"),
14: _("gray"),
15: _("light gray")
}
def _get_output_bg_color():
return gui.widgets.get_object("output").get_style().base[
gtk.STATE_NORMAL]
def _get_output_fg_color():
return gui.widgets.get_object("output").get_style().fg[NORMAL]
def get_widget_base_color(widget):
return widget.get_style().base[gtk.STATE_NORMAL]
@types (msg = basestring)
def parse_color_codes_to_tags(msg):
""" Parse the mIRC color format ^Cn[,m] and convert it
to the intern handled <font></font> tag.
Convert the numbers n and m into contrast color codes
and use them as foreground/background.
"""
def get_gdk_color(ccolor):
return contrast.contrast_render_foreground_color(
_get_output_bg_color(), ccolor)
last_i = -1
count = 0 # openend <font>
# initialize attributes self.pattern / self.color_table
self = code.init_function_attrs(
parse_color_codes_to_tags,
pattern = re.compile(chr(3)+COLOR_PATTERN),
color_table = COLOR_TABLE)
while True:
try:
i = msg.index(chr(3), last_i+1)
except ValueError:
break
match = self.pattern.match(msg[i:i+6])
if match:
groups = match.groups()
tag = "<span"
if count != 0:
# close the previous color
tag = "</span>" + tag
count -= 1
try:
fg = self.color_table[int(groups[0])]
fg = get_gdk_color(fg)
except (KeyError, TypeError):
fg = None
else:
tag += " foreground='%s'" % fg
try:
bg = self.color_table[int(groups[1][1:])]
bg = get_gdk_color(bg)
except (KeyError, TypeError):
bg = None
else:
tag += " background='%s'" % bg
tag += ">"
skip_len = 1 + (groups[0] and len(groups[0]) or 0) \
+ (groups[1] and len(groups[1]) or 0)
msg = msg[:i] + tag + msg[i+skip_len:]
count += 1
else:
if count > 0:
# single ^C, if there's an open tag, close it
msg = msg[:i] + "</span>" + msg[i+1:]
count -= 1
last_i = i
if count != 0:
# make sure the <font> is closed.
msg = msg + "</span>"
return msg
@types (s = basestring)
def parse_color_codes_to_markups(s):
""" convert color codes to color markups (%C) and escape
every % in s with %%.
"""
s = s.replace("%", "%%")
return s.replace(chr(3), "%C")
@types (s = basestring)
def parse_color_markups_to_codes(s):
""" split s for %C markups and parse the numbers following.
After parsing, return the new string.
"""
s_split = escape.unescape_split("%C", s, escape_char="%")
return chr(3).join(s_split)
@types (text = basestring)
def strip_color_codes(text):
""" strip all color codes (chr(3)) and the following numbers """
pattern = re.compile("\003([0-9]{1,2}(,[0-9]{1,2})?)?")
start = 0
while True:
result = pattern.search(text, start)
if not result:
break
text = text[:result.start()] + text[result.end():]
start = result.end()
return text
# Text coloring
def is_contrast_color(value):
""" checks if the given value is a contrast color or a RGB color """
return isinstance(value, basestring) and value[0] != "#"
def get_color_by_key(key, bgcolor=None):
""" get the configured color for the given key as GdkColor.
The key is defined in config section "colors".
Example: get_color_by_key("last_log") -> gtk.gdk.Color("#dddddd")
Note that str(gtk.gdk.Color("#000")) == "#000".
If bgcolor is None, the bgcolor is retrieved
by _get_output_bg_color.
"""
cvalue = config.get("colors",key)
if not bgcolor:
bgcolor = _get_output_bg_color()
if cvalue == None:
raise KeyError, "Unknown color key: '%s'" % (key)
if cvalue[0] == "#":
return gtk.gdk.Color(cvalue)
elif key == "rules_color" and config.is_default("colors",key):
return gui.widgets.get_object("output").get_style().base[
gtk.STATE_INSENSITIVE]
else:
return contrast.contrast_render_foreground_color(
bgcolor, int(cvalue))
def get_nick_color(nick):
"""
Returns a static color for the nick given.
The returned color depends on the color mapping
set in config module.
"""
def pick_nick_color(colors, nick):
return colors[sum([ord(n) for n in nick]) % len(colors)]
if not config.get_bool("tekka","color_text"):
return _get_output_fg_color()
if not config.get_bool("colors", "nick_contrast_colors"):
# pick a color out of the user defined list
colors = config.get_list("colors", "nick_colors", [])
color = pick_nick_color(colors, nick)
return color
else:
# pick a contrast color
bg_color = _get_output_bg_color()
color = pick_nick_color(contrast.colors[:-1], nick)
r = contrast.contrast_render_foreground_color(bg_color, color)
return r
def get_text_color(nick):
"""
Same as color.get_nick_color but for text and defaults
to another value (text_message)
"""
if not config.get_bool("tekka","color_text"):
return _get_output_fg_color()
colors = contrast.colors[:-1]
if not colors or not config.get_bool("tekka","color_nick_text"):
return get_color_by_key("text_message")
bg_color = _get_output_bg_color()
color = colors[sum([ord(n) for n in nick]) % len(colors)]
r = contrast.contrast_render_foreground_color(bg_color, color)
return r
def colorize_message(msgtype, message):
if not config.get_bool("tekka", "color_text"):
return message
else:
return "<font foreground='%s'>%s</font>" % (
get_color_by_key("text_%s" % msgtype),
message)
| {
"content_hash": "271f91e7380cd71cef5554d606b98695",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 74,
"avg_line_length": 25.894039735099337,
"alnum_prop": 0.669309462915601,
"repo_name": "sushi-irc/tekka",
"id": "6b1c772202da12a0329f1c356eb91029e8551622",
"size": "7820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tekka/helper/color.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "483168"
}
],
"symlink_target": ""
} |
"""
RFB protocol implementattion, client side.
Override RFBClient and RFBFactory in your application.
See vncviewer.py for an example.
Reference:
http://www.realvnc.com/docs/rfbproto.pdf
(C) 2003 [email protected]
MIT License
"""
import sys
import math
from struct import pack, unpack
import pyDes
from twisted.python import usage, log
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import protocol
from twisted.application import internet, service
#~ from twisted.internet import reactor
#encoding-type
#for SetEncodings()
RAW_ENCODING = 0
COPY_RECTANGLE_ENCODING = 1
RRE_ENCODING = 2
CORRE_ENCODING = 4
HEXTILE_ENCODING = 5
ZLIB_ENCODING = 6
TIGHT_ENCODING = 7
ZLIBHEX_ENCODING = 8
ZRLE_ENCODING = 16
#0xffffff00 to 0xffffffff tight options
PSEUDO_CURSOR_ENCODING = -239
#keycodes
#for KeyEvent()
KEY_BackSpace = 0xff08
KEY_Tab = 0xff09
KEY_Return = 0xff0d
KEY_Escape = 0xff1b
KEY_Insert = 0xff63
KEY_Delete = 0xffff
KEY_Home = 0xff50
KEY_End = 0xff57
KEY_PageUp = 0xff55
KEY_PageDown = 0xff56
KEY_Left = 0xff51
KEY_Up = 0xff52
KEY_Right = 0xff53
KEY_Down = 0xff54
KEY_F1 = 0xffbe
KEY_F2 = 0xffbf
KEY_F3 = 0xffc0
KEY_F4 = 0xffc1
KEY_F5 = 0xffc2
KEY_F6 = 0xffc3
KEY_F7 = 0xffc4
KEY_F8 = 0xffc5
KEY_F9 = 0xffc6
KEY_F10 = 0xffc7
KEY_F11 = 0xffc8
KEY_F12 = 0xffc9
KEY_F13 = 0xFFCA
KEY_F14 = 0xFFCB
KEY_F15 = 0xFFCC
KEY_F16 = 0xFFCD
KEY_F17 = 0xFFCE
KEY_F18 = 0xFFCF
KEY_F19 = 0xFFD0
KEY_F20 = 0xFFD1
KEY_ShiftLeft = 0xffe1
KEY_ShiftRight = 0xffe2
KEY_ControlLeft = 0xffe3
KEY_ControlRight = 0xffe4
KEY_MetaLeft = 0xffe7
KEY_MetaRight = 0xffe8
KEY_AltLeft = 0xffe9
KEY_AltRight = 0xffea
KEY_Scroll_Lock = 0xFF14
KEY_Sys_Req = 0xFF15
KEY_Num_Lock = 0xFF7F
KEY_Caps_Lock = 0xFFE5
KEY_Pause = 0xFF13
KEY_Super_L = 0xFFEB
KEY_Super_R = 0xFFEC
KEY_Hyper_L = 0xFFED
KEY_Hyper_R = 0xFFEE
KEY_KP_0 = 0xFFB0
KEY_KP_1 = 0xFFB1
KEY_KP_2 = 0xFFB2
KEY_KP_3 = 0xFFB3
KEY_KP_4 = 0xFFB4
KEY_KP_5 = 0xFFB5
KEY_KP_6 = 0xFFB6
KEY_KP_7 = 0xFFB7
KEY_KP_8 = 0xFFB8
KEY_KP_9 = 0xFFB9
KEY_KP_Enter = 0xFF8D
KEY_ForwardSlash = 0x002F
KEY_BackSlash = 0x005C
KEY_SpaceBar= 0x0020
class RFBClient(Protocol):
def __init__(self):
self._packet = []
self._packet_len = 0
self._handler = self._handleInitial
self._already_expecting = 0
#------------------------------------------------------
# states used on connection startup
#------------------------------------------------------
def _handleInitial(self):
buffer = ''.join(self._packet)
if '\n' in buffer:
if buffer[:3] == 'RFB':
#~ print "rfb"
maj, min = [int(x) for x in buffer[3:-1].split('.')]
#~ print maj, min
if (maj, min) not in [(3,3), (3,7), (3,8), (3,889), (4,0)]:
log.msg("wrong protocol version, %s.%s\n", maj, min)
self.transport.loseConnection()
buffer = buffer[12:]
self.transport.write('RFB 003.003\n')
self._packet[:] = [buffer]
self._packet_len = len(buffer)
self._handler = self._handleExpected
self.expect(self._handleAuth, 4)
else:
self._packet[:] = [buffer]
self._packet_len = len(buffer)
def _handleAuth(self, block):
(auth,) = unpack("!I", block)
#~ print "auth:", auth
if auth == 0:
self.expect(self._handleConnFailed, 4)
elif auth == 1:
self._doClientInitialization()
return
elif auth == 2:
self.expect(self._handleVNCAuth, 16)
else:
log.msg("unknown auth response (%d)" % auth)
def _handleConnFailed(self, block):
(waitfor,) = unpack("!I", block)
self.expect(self._handleConnMessage, waitfor)
def _handleConnMessage(self, block):
log.msg("Connection refused: %r" % block)
def _handleVNCAuth(self, block):
self._challenge = block
self.vncRequestPassword()
self.expect(self._handleVNCAuthResult, 4)
def sendPassword(self, password):
"""send password"""
pw = (password + '\0' * 8)[:8] #make sure its 8 chars long, zero padded
des = RFBDes(pw)
response = des.encrypt(self._challenge)
self.transport.write(response)
def _handleVNCAuthResult(self, block):
(result,) = unpack("!I", block)
#~ print "auth:", auth
if result == 0: #OK
self._doClientInitialization()
return
elif result == 1: #failed
self.vncAuthFailed("autenthication failed")
self.transport.loseConnection()
elif result == 2: #too many
self.vncAuthFailed("too many tries to log in")
self.transport.loseConnection()
else:
log.msg("unknown auth response (%d)" % result)
def _doClientInitialization(self):
self.transport.write(pack("!B", self.factory.shared))
self.expect(self._handleServerInit, 24)
def _handleServerInit(self, block):
(self.width, self.height, pixformat, namelen) = unpack("!HH16sI", block)
(self.bpp, self.depth, self.bigendian, self.truecolor,
self.redmax, self.greenmax, self.bluemax,
self.redshift, self.greenshift, self.blueshift) = \
unpack("!BBBBHHHBBBxxx", pixformat)
self.bypp = self.bpp / 8 #calc bytes per pixel
self.expect(self._handleServerName, namelen)
def _handleServerName(self, block):
self.name = block
#callback:
self.vncConnectionMade()
self.expect(self._handleConnection, 1)
#------------------------------------------------------
# Server to client messages
#------------------------------------------------------
def _handleConnection(self, block):
(msgid,) = unpack("!B", block)
if msgid == 0:
self.expect(self._handleFramebufferUpdate, 3)
elif msgid == 2:
self.bell()
self.expect(self._handleConnection, 1)
elif msgid == 3:
self.expect(self._handleServerCutText, 7)
else:
log.msg("unknown message received (id %d)" % msgid)
self.expect(self._handleConnection, 1)
def _handleFramebufferUpdate(self, block):
(self.rectangles,) = unpack("!xH", block)
self.rectanglePos = []
self.beginUpdate()
self._doConnection()
def _doConnection(self):
if self.rectangles:
self.expect(self._handleRectangle, 12)
else:
self.commitUpdate(self.rectanglePos)
self.expect(self._handleConnection, 1)
def _handleRectangle(self, block):
(x, y, width, height, encoding) = unpack("!HHHHi", block)
if self.rectangles:
self.rectangles -= 1
self.rectanglePos.append( (x, y, width, height) )
if encoding == COPY_RECTANGLE_ENCODING:
self.expect(self._handleDecodeCopyrect, 4, x, y, width, height)
elif encoding == RAW_ENCODING:
self.expect(self._handleDecodeRAW, width*height*self.bypp, x, y, width, height)
elif encoding == HEXTILE_ENCODING:
self._doNextHextileSubrect(None, None, x, y, width, height, None, None)
elif encoding == CORRE_ENCODING:
self.expect(self._handleDecodeCORRE, 4 + self.bypp, x, y, width, height)
elif encoding == RRE_ENCODING:
self.expect(self._handleDecodeRRE, 4 + self.bypp, x, y, width, height)
#~ elif encoding == ZRLE_ENCODING:
#~ self.expect(self._handleDecodeZRLE, )
elif encoding == PSEUDO_CURSOR_ENCODING:
length = width * height * self.bypp
length += int(math.floor((width + 7.0) / 8)) * height
self.expect(self._handleDecodePsuedoCursor, length, x, y, width, height)
else:
log.msg("unknown encoding received (encoding %d)" % encoding)
self._doConnection()
else:
self._doConnection()
# --- RAW Encoding
def _handleDecodeRAW(self, block, x, y, width, height):
#TODO convert pixel format?
self.updateRectangle(x, y, width, height, block)
self._doConnection()
# --- CopyRect Encoding
def _handleDecodeCopyrect(self, block, x, y, width, height):
(srcx, srcy) = unpack("!HH", block)
self.copyRectangle(srcx, srcy, x, y, width, height)
self._doConnection()
# --- RRE Encoding
def _handleDecodeRRE(self, block, x, y, width, height):
(subrects,) = unpack("!I", block[:4])
color = block[4:]
self.fillRectangle(x, y, width, height, color)
if subrects:
self.expect(self._handleRRESubRectangles, (8 + self.bypp) * subrects, x, y)
else:
self._doConnection()
def _handleRRESubRectangles(self, block, topx, topy):
#~ print "_handleRRESubRectangle"
pos = 0
end = len(block)
sz = self.bypp + 8
format = "!%dsHHHH" % self.bypp
while pos < end:
(color, x, y, width, height) = unpack(format, block[pos:pos+sz])
self.fillRectangle(topx + x, topy + y, width, height, color)
pos += sz
self._doConnection()
# --- CoRRE Encoding
def _handleDecodeCORRE(self, block, x, y, width, height):
(subrects,) = unpack("!I", block[:4])
color = block[4:]
self.fillRectangle(x, y, width, height, color)
if subrects:
self.expect(self._handleDecodeCORRERectangles, (4 + self.bypp)*subrects, x, y)
else:
self._doConnection()
def _handleDecodeCORRERectangles(self, block, topx, topy):
#~ print "_handleDecodeCORRERectangle"
pos = 0
end = len(block)
sz = self.bypp + 4
format = "!%dsBBBB" % self.bypp
while pos < sz:
(color, x, y, width, height) = unpack(format, block[pos:pos+sz])
self.fillRectangle(topx + x, topy + y, width, height, color)
pos += sz
self._doConnection()
# --- Hexile Encoding
def _doNextHextileSubrect(self, bg, color, x, y, width, height, tx, ty):
#~ print "_doNextHextileSubrect %r" % ((color, x, y, width, height, tx, ty), )
#coords of next tile
#its line after line of tiles
#finished when the last line is completly received
#dont inc the first time
if tx is not None:
#calc next subrect pos
tx += 16
if tx >= x + width:
tx = x
ty += 16
else:
tx = x
ty = y
#more tiles?
if ty >= y + height:
self._doConnection()
else:
self.expect(self._handleDecodeHextile, 1, bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextile(self, block, bg, color, x, y, width, height, tx, ty):
(subencoding,) = unpack("!B", block)
#calc tile size
tw = th = 16
if x + width - tx < 16: tw = x + width - tx
if y + height - ty < 16: th = y + height- ty
#decode tile
if subencoding & 1: #RAW
self.expect(self._handleDecodeHextileRAW, tw*th*self.bypp, bg, color, x, y, width, height, tx, ty, tw, th)
else:
numbytes = 0
if subencoding & 2: #BackgroundSpecified
numbytes += self.bypp
if subencoding & 4: #ForegroundSpecified
numbytes += self.bypp
if subencoding & 8: #AnySubrects
numbytes += 1
if numbytes:
self.expect(self._handleDecodeHextileSubrect, numbytes, subencoding, bg, color, x, y, width, height, tx, ty, tw, th)
else:
self.fillRectangle(tx, ty, tw, th, bg)
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrect(self, block, subencoding, bg, color, x, y, width, height, tx, ty, tw, th):
subrects = 0
pos = 0
if subencoding & 2: #BackgroundSpecified
bg = block[:self.bypp]
pos += self.bypp
self.fillRectangle(tx, ty, tw, th, bg)
if subencoding & 4: #ForegroundSpecified
color = block[pos:pos+self.bypp]
pos += self.bypp
if subencoding & 8: #AnySubrects
#~ (subrects, ) = unpack("!B", block)
subrects = ord(block[pos])
#~ print subrects
if subrects:
if subencoding & 16: #SubrectsColoured
self.expect(self._handleDecodeHextileSubrectsColoured, (self.bypp + 2)*subrects, bg, color, subrects, x, y, width, height, tx, ty, tw, th)
else:
self.expect(self._handleDecodeHextileSubrectsFG, 2*subrects, bg, color, subrects, x, y, width, height, tx, ty, tw, th)
else:
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileRAW(self, block, bg, color, x, y, width, height, tx, ty, tw, th):
"""the tile is in raw encoding"""
self.updateRectangle(tx, ty, tw, th, block)
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrectsColoured(self, block, bg, color, subrects, x, y, width, height, tx, ty, tw, th):
"""subrects with their own color"""
sz = self.bypp + 2
pos = 0
end = len(block)
while pos < end:
pos2 = pos + self.bypp
color = block[pos:pos2]
xy = ord(block[pos2])
wh = ord(block[pos2+1])
sx = xy >> 4
sy = xy & 0xf
sw = (wh >> 4) + 1
sh = (wh & 0xf) + 1
self.fillRectangle(tx + sx, ty + sy, sw, sh, color)
pos += sz
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrectsFG(self, block, bg, color, subrects, x, y, width, height, tx, ty, tw, th):
"""all subrect with same color"""
pos = 0
end = len(block)
while pos < end:
xy = ord(block[pos])
wh = ord(block[pos+1])
sx = xy >> 4
sy = xy & 0xf
sw = (wh >> 4) + 1
sh = (wh & 0xf) + 1
self.fillRectangle(tx + sx, ty + sy, sw, sh, color)
pos += 2
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
# --- ZRLE Encoding
def _handleDecodeZRLE(self, block):
raise NotImplementedError
# --- Pseudo Cursor Encoding
def _handleDecodePsuedoCursor(self, block, x, y, width, height):
split = width * height * self.bypp
image = block[:split]
mask = block[split:]
self.updateCursor(x, y, width, height, image, mask)
self._doConnection()
# --- other server messages
def _handleServerCutText(self, block):
(length, ) = unpack("!xxxI", block)
self.expect(self._handleServerCutTextValue, length)
def _handleServerCutTextValue(self, block):
self.copy_text(block)
self.expect(self._handleConnection, 1)
#------------------------------------------------------
# incomming data redirector
#------------------------------------------------------
def dataReceived(self, data):
#~ sys.stdout.write(repr(data) + '\n')
#~ print len(data), ", ", len(self._packet)
self._packet.append(data)
self._packet_len += len(data)
self._handler()
def _handleExpected(self):
if self._packet_len >= self._expected_len:
buffer = ''.join(self._packet)
while len(buffer) >= self._expected_len:
self._already_expecting = 1
block, buffer = buffer[:self._expected_len], buffer[self._expected_len:]
#~ log.msg("handle %r with %r\n" % (block, self._expected_handler.__name__))
self._expected_handler(block, *self._expected_args, **self._expected_kwargs)
self._packet[:] = [buffer]
self._packet_len = len(buffer)
self._already_expecting = 0
def expect(self, handler, size, *args, **kwargs):
#~ log.msg("expect(%r, %r, %r, %r)\n" % (handler.__name__, size, args, kwargs))
self._expected_handler = handler
self._expected_len = size
self._expected_args = args
self._expected_kwargs = kwargs
if not self._already_expecting:
self._handleExpected() #just in case that there is already enough data
#------------------------------------------------------
# client -> server messages
#------------------------------------------------------
def setPixelFormat(self, bpp=32, depth=24, bigendian=0, truecolor=1, redmax=255, greenmax=255, bluemax=255, redshift=0, greenshift=8, blueshift=16):
pixformat = pack("!BBBBHHHBBBxxx", bpp, depth, bigendian, truecolor, redmax, greenmax, bluemax, redshift, greenshift, blueshift)
self.transport.write(pack("!Bxxx16s", 0, pixformat))
#rember these settings
self.bpp, self.depth, self.bigendian, self.truecolor = bpp, depth, bigendian, truecolor
self.redmax, self.greenmax, self.bluemax = redmax, greenmax, bluemax
self.redshift, self.greenshift, self.blueshift = redshift, greenshift, blueshift
self.bypp = self.bpp / 8 #calc bytes per pixel
#~ print self.bypp
def setEncodings(self, list_of_encodings):
self.transport.write(pack("!BxH", 2, len(list_of_encodings)))
for encoding in list_of_encodings:
self.transport.write(pack("!i", encoding))
def framebufferUpdateRequest(self, x=0, y=0, width=None, height=None, incremental=0):
if width is None: width = self.width - x
if height is None: height = self.height - y
self.transport.write(pack("!BBHHHH", 3, incremental, x, y, width, height))
def keyEvent(self, key, down=1):
"""For most ordinary keys, the "keysym" is the same as the corresponding ASCII value.
Other common keys are shown in the KEY_ constants."""
self.transport.write(pack("!BBxxI", 4, down, key))
def pointerEvent(self, x, y, buttonmask=0):
"""Indicates either pointer movement or a pointer button press or release. The pointer is
now at (x-position, y-position), and the current state of buttons 1 to 8 are represented
by bits 0 to 7 of button-mask respectively, 0 meaning up, 1 meaning down (pressed).
"""
self.transport.write(pack("!BBHH", 5, buttonmask, x, y))
def clientCutText(self, message):
"""The client has new ASCII text in its cut buffer.
(aka clipboard)
"""
self.transport.write(pack("!BxxxI", 6, len(message)) + message)
#------------------------------------------------------
# callbacks
# override these in your application
#------------------------------------------------------
def vncConnectionMade(self):
"""connection is initialized and ready.
typicaly, the pixel format is set here."""
def vncRequestPassword(self):
"""a password is needed to log on, use sendPassword() to
send one."""
if self.factory.password is None:
log.msg("need a password")
self.transport.loseConnection()
return
self.sendPassword(self.factory.password)
def vncAuthFailed(self, reason):
"""called when the authentication failed.
the connection is closed."""
log.msg("Cannot connect: %s" % reason)
def beginUpdate(self):
"""called before a series of updateRectangle(),
copyRectangle() or fillRectangle()."""
def commitUpdate(self, rectangles=None):
"""called after a series of updateRectangle(), copyRectangle()
or fillRectangle() are finished.
typicaly, here is the place to request the next screen
update with FramebufferUpdateRequest(incremental=1).
argument is a list of tuples (x,y,w,h) with the updated
rectangles."""
def updateRectangle(self, x, y, width, height, data):
"""new bitmap data. data is a string in the pixel format set
up earlier."""
def copyRectangle(self, srcx, srcy, x, y, width, height):
"""used for copyrect encoding. copy the given rectangle
(src, srxy, width, height) to the target coords (x,y)"""
def fillRectangle(self, x, y, width, height, color):
"""fill the area with the color. the color is a string in
the pixel format set up earlier"""
#fallback variant, use update recatngle
#override with specialized function for better performance
self.updateRectangle(x, y, width, height, color*width*height)
def updateCursor(self, x, y, width, height, image, mask):
""" New cursor, focuses at (x, y)
"""
def bell(self):
"""bell"""
def copy_text(self, text):
"""The server has new ASCII text in its cut buffer.
(aka clipboard)"""
class RFBFactory(protocol.ClientFactory):
"""A factory for remote frame buffer connections."""
# the class of the protocol to build
# should be overriden by application to use a derrived class
protocol = RFBClient
def __init__(self, password = None, shared = 0):
self.password = password
self.shared = shared
class RFBDes(pyDes.des):
def setKey(self, key):
"""RFB protocol for authentication requires client to encrypt
challenge sent by server with password using DES method. However,
bits in each byte of the password are put in reverse order before
using it as encryption key."""
newkey = []
for ki in range(len(key)):
bsrc = ord(key[ki])
btgt = 0
for i in range(8):
if bsrc & (1 << i):
btgt = btgt | (1 << 7-i)
newkey.append(chr(btgt))
super(RFBDes, self).setKey(newkey)
# --- test code only, see vncviewer.py
if __name__ == '__main__':
class RFBTest(RFBClient):
"""dummy client"""
def vncConnectionMade(self):
print "Screen format: depth=%d bytes_per_pixel=%r" % (self.depth, self.bpp)
print "Desktop name: %r" % self.name
self.SetEncodings([RAW_ENCODING])
self.FramebufferUpdateRequest()
def updateRectangle(self, x, y, width, height, data):
print "%s " * 5 % (x, y, width, height, repr(data[:20]))
class RFBTestFactory(protocol.ClientFactory):
"""test factory"""
protocol = RFBTest
def clientConnectionLost(self, connector, reason):
print reason
from twisted.internet import reactor
reactor.stop()
#~ connector.connect()
def clientConnectionFailed(self, connector, reason):
print "connection failed:", reason
from twisted.internet import reactor
reactor.stop()
class Options(usage.Options):
"""command line options"""
optParameters = [
['display', 'd', '0', 'VNC display'],
['host', 'h', 'localhost', 'remote hostname'],
['outfile', 'o', None, 'Logfile [default: sys.stdout]'],
]
o = Options()
try:
o.parseOptions()
except usage.UsageError, errortext:
print "%s: %s" % (sys.argv[0], errortext)
print "%s: Try --help for usage details." % (sys.argv[0])
raise SystemExit, 1
logFile = sys.stdout
if o.opts['outfile']:
logFile = o.opts['outfile']
log.startLogging(logFile)
host = o.opts['host']
port = int(o.opts['display']) + 5900
application = service.Application("rfb test") # create Application
# connect to this host and port, and reconnect if we get disconnected
vncClient = internet.TCPClient(host, port, RFBFactory()) # create the service
vncClient.setServiceParent(application)
# this file should be run as 'twistd -y rfb.py' but it didn't work -
# could't import crippled_des.py, so using this hack.
# now with crippled_des.py replaced with pyDes this can be no more actual
from twisted.internet import reactor
vncClient.startService()
reactor.run()
| {
"content_hash": "c7fbe20a78fa1c74ebdd487408a1c59f",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 154,
"avg_line_length": 36.58565153733529,
"alnum_prop": 0.5641107731711221,
"repo_name": "f0r34chb3t4/vncdotool",
"id": "3709c187e19caf7da1d1d9ad91573b05b32522f5",
"size": "24988",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vncdotool/rfb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "160129"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
from pycoin.tx import Tx
from pycoin.tx.script.tools import disassemble
def main():
tx = Tx.tx_from_hex(sys.argv[1])
print('Input Scripts:')
for inp in tx.txs_in:
print(' - ' + disassemble(inp.script))
print('Output Scripts:')
for out in tx.txs_out:
print(' - ' + disassemble(out.script))
if __name__ == '__main__':
main()
| {
"content_hash": "778817299771ba2884ee5fcfbadd3daa",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 46,
"avg_line_length": 24.41176470588235,
"alnum_prop": 0.6168674698795181,
"repo_name": "bit-oasis/multisig-core",
"id": "d9538b4a56978621197577ca7a375af3e605bb54",
"size": "433",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "multisigcore/scripts/decode_tx_scripts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77905"
}
],
"symlink_target": ""
} |
from django import forms
from meadery.models import Product
from inventory.models import Warehouse, Row, Shelf, Bin, Crate, Jar
class WarehouseAdminForm(forms.ModelForm):
class Meta:
model = Warehouse
exclude = ["slug"]
class RowAdminForm(forms.ModelForm):
warehouse = forms.ModelChoiceField(queryset=Warehouse.objects)
class Meta:
model = Row
exclude = ["slug"]
class ShelfAdminForm(forms.ModelForm):
row = forms.ModelChoiceField(queryset=Row.objects)
class Meta:
model = Shelf
exclude = ["slug"]
class BinAdminForm(forms.ModelForm):
shelf = forms.ModelChoiceField(queryset=Shelf.objects)
class Meta:
model = Bin
exclude = ["slug"]
class CrateAdminForm(forms.ModelForm):
bin = forms.ModelChoiceField(queryset=Bin.objects)
class Meta:
model = Crate
exclude = ["slug"]
class JarAdminForm(forms.ModelForm):
product = forms.ModelChoiceField(queryset=Product.active)
class Meta:
model = Jar
exclude = ["slug"]
| {
"content_hash": "a90782e4664dbc01511b6336319750a0",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 67,
"avg_line_length": 21.693877551020407,
"alnum_prop": 0.6688617121354656,
"repo_name": "mathuin/pyment",
"id": "48fa4863d037de15ba5c7577d6d9d6a2c1c8abea",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "django/inventory/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7253"
},
{
"name": "CSS",
"bytes": "3691"
},
{
"name": "Dockerfile",
"bytes": "325"
},
{
"name": "HTML",
"bytes": "17986"
},
{
"name": "JavaScript",
"bytes": "1995"
},
{
"name": "Makefile",
"bytes": "7418"
},
{
"name": "Python",
"bytes": "239563"
},
{
"name": "Shell",
"bytes": "4610"
}
],
"symlink_target": ""
} |
''' Incremental-Classifier Learning
Authors : Khurram Javed, Muhammad Talha Paracha
Maintainer : Khurram Javed
Lab : TUKL-SEECS R&D Lab
Email : [email protected] '''
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('agg')
MEDIUM_SIZE = 18
font = {'family': 'sans-serif',
'weight': 'bold'}
matplotlib.rc('xtick', labelsize=MEDIUM_SIZE)
matplotlib.rc('ytick', labelsize=MEDIUM_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
# matplotlib.rc('font', **font)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
class Plotter():
def __init__(self):
import itertools
# plt.figure(figsize=(12, 9))
self.marker = itertools.cycle(('o', '+', "v", "^", "8", '.', '*'))
self.handles = []
self.lines = itertools.cycle(('--', '-.', '-', ':'))
def plot(self, x, y, xLabel="Number of Classes", yLabel="Accuracy %", legend="none", title=None, error=None):
self.x = x
self.y = y
plt.grid(color='0.89', linestyle='--', linewidth=1.0)
if error is None:
l, = plt.plot(x, y, linestyle=next(self.lines), marker=next(self.marker), label=legend, linewidth=3.0)
else:
l = plt.errorbar(x, y, yerr=error, capsize=4.0, capthick=2.0, linestyle=next(self.lines),
marker=next(self.marker), label=legend, linewidth=3.0)
self.handles.append(l)
self.x_label = xLabel
self.y_label = yLabel
if title is not None:
plt.title(title)
def save_fig(self, path, xticks=105, title=None, yStart=0, xRange=0, yRange=10):
if title is not None:
plt.title(title)
plt.legend(handles=self.handles)
plt.ylim((yStart, 100 + 0.2))
plt.xlim((0, xticks + .2))
plt.ylabel(self.y_label)
plt.xlabel(self.x_label)
plt.yticks(list(range(yStart, 101, yRange)))
print(list(range(yStart, 105, yRange)))
plt.xticks(list(range(0, xticks + 1, xRange + int(xticks / 10))))
plt.savefig(path + ".eps", format='eps')
plt.gcf().clear()
def save_fig2(self, path, xticks=105):
plt.legend(handles=self.handles)
plt.xlabel("Memory Budget")
plt.ylabel("Average Incremental Accuracy")
plt.savefig(path + ".jpg")
plt.gcf().clear()
def plotMatrix(self, epoch, path, img):
plt.imshow(img, cmap='plasma', interpolation='nearest')
plt.colorbar()
plt.savefig(path + str(epoch) + ".svg", format='svg')
plt.gcf().clear()
def saveImage(self, img, path, epoch):
from PIL import Image
im = Image.fromarray(img)
im.save(path + str(epoch) + ".jpg")
if __name__ == "__main__":
pl = Plotter()
pl.plot([1, 2, 3, 4], [2, 3, 6, 2])
pl.save_fig("test.jpg")
| {
"content_hash": "e5a138f3672e720828c4920b66829fa7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 114,
"avg_line_length": 32.75,
"alnum_prop": 0.5857043719639139,
"repo_name": "Khurramjaved96/Recursive-CNNs",
"id": "9aff5238fc545dc436a2ab25c8dfb7e66600d023",
"size": "2882",
"binary": false,
"copies": "1",
"ref": "refs/heads/Pytorch-RecursiveCNN",
"path": "plotter/plotter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86476"
}
],
"symlink_target": ""
} |
"""Classes supporting unit and lesson editing."""
__author__ = 'John Orr ([email protected])'
import cgi
import logging
import random
import urllib
import messages
import yaml
from common import safe_dom
from common import tags
from common import utils as common_utils
from common.schema_fields import FieldArray
from common.schema_fields import FieldRegistry
from common.schema_fields import SchemaField
from controllers import sites
from controllers import utils
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import models as m_models
from models import review
from models import roles
from models import transforms
from modules.oeditor import oeditor
from tools import verify
DRAFT_TEXT = 'Private'
PUBLISHED_TEXT = 'Public'
# The editor has severe limitations for editing nested lists of objects. First,
# it does not allow one to move a lesson from one unit to another. We need a way
# of doing that. Second, JSON schema specification does not seem to support a
# type-safe array, which has objects of different types. We also want that
# badly :). All in all - using generic schema-based object editor for editing
# nested arrayable polymorphic attributes is a pain...
STATUS_ANNOTATION = oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', DRAFT_TEXT,
PUBLISHED_TEXT, class_name='split-from-main-group')
def generate_common_schema(title):
common = FieldRegistry(title)
common.add_property(SchemaField(
'key', 'ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'}))
common.add_property(
SchemaField('type', 'Type', 'string', editable=False))
common.add_property(
SchemaField('title', 'Title', 'string', optional=True))
common.add_property(
SchemaField('description', 'Description', 'string', optional=True))
# Label Groups
label = FieldRegistry(None, description='label')
label.add_property(SchemaField('id', 'ID', 'integer',
hidden=True,
editable=False))
label.add_property(SchemaField('checked', None, 'boolean'))
label.add_property(SchemaField('title', None, 'string',
optional=True,
editable=False))
label.add_property(SchemaField('description', None, 'string',
optional=True,
editable=False,
extra_schema_dict_values={
'className': 'label-description'}))
label.add_property(SchemaField('no_labels', None, 'string',
optional=True,
editable=False,
extra_schema_dict_values={
'className': 'label-none-in-group'}))
label_group = FieldRegistry('', description='label groups')
label_group.add_property(SchemaField('title', None, 'string',
editable=False))
label_group.add_property(FieldArray('labels', None,
item_type=label,
extra_schema_dict_values={
'className': 'label-group'}))
common.add_property(
FieldArray('label_groups', 'Labels',
item_type=label_group,
extra_schema_dict_values={
'className': 'inputEx-Field label-group-list',
}))
# Public/Draft status
common.add_property(SchemaField('is_draft', 'Status', 'boolean',
select_data=[(True, DRAFT_TEXT),
(False, PUBLISHED_TEXT)],
extra_schema_dict_values={
'className': 'split-from-main-group'}))
return common
# Allowed matchers. Keys of this dict represent internal keys for the matcher
# type, and the value represents the corresponding string that will appear in
# the dashboard UI.
ALLOWED_MATCHERS_NAMES = {review.PEER_MATCHER: messages.PEER_MATCHER_NAME}
# Allowed graders. Keys of this dict represent internal keys for the grader
# type, and the value represents the corresponding string that will appear in
# the dashboard UI.
ALLOWED_GRADERS_NAMES = {
courses.AUTO_GRADER: messages.AUTO_GRADER_NAME,
courses.HUMAN_GRADER: messages.HUMAN_GRADER_NAME,
}
class CourseOutlineRights(object):
"""Manages view/edit rights for course outline."""
@classmethod
def can_view(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class UnitLessonEditor(ApplicationHandler):
"""An editor for the unit and lesson titles."""
def get_import_course(self):
"""Shows setup form for course import."""
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
template_values['page_title_linked'] = self.format_title(
'Import Course')
annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT()
if not annotations:
template_values['main_content'] = 'No courses to import from.'
self.render_page(template_values)
return
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(ImportCourseRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
ImportCourseRESTHandler.SCHEMA_JSON,
annotations,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Import',
required_modules=ImportCourseRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
template_values['page_description'] = messages.IMPORT_COURSE_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit_lesson(self):
"""Shows editor for the list of unit and lesson titles."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(UnitLessonTitleRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
UnitLessonTitleRESTHandler.SCHEMA_JSON,
UnitLessonTitleRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=UnitLessonTitleRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Course Outline')
template_values[
'page_description'] = messages.COURSE_OUTLINE_EDITOR_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def post_add_lesson(self):
"""Adds new lesson to a first unit of the course."""
course = courses.Course(self)
first_unit = None
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_UNIT:
first_unit = unit
break
if first_unit:
lesson = course.add_lesson(first_unit)
course.save()
# TODO(psimakov): complete 'edit_lesson' view
self.redirect(self.get_action_url(
'edit_lesson', key=lesson.lesson_id,
extra_args={'is_newly_created': 1}))
else:
self.redirect('/dashboard')
def post_add_unit(self):
"""Adds new unit to a course."""
course = courses.Course(self)
unit = course.add_unit()
course.save()
self.redirect(self.get_action_url(
'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1}))
def post_add_link(self):
"""Adds new link to a course."""
course = courses.Course(self)
link = course.add_link()
link.href = ''
course.save()
self.redirect(self.get_action_url(
'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))
def post_add_assessment(self):
"""Adds new assessment to a course."""
course = courses.Course(self)
assessment = course.add_assessment()
course.save()
self.redirect(self.get_action_url(
'edit_assessment', key=assessment.unit_id,
extra_args={'is_newly_created': 1}))
def post_set_draft_status(self):
"""Sets the draft status of a course component.
Only works with CourseModel13 courses, but the REST handler
is only called with this type of courses.
"""
key = self.request.get('key')
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
component_type = self.request.get('type')
if component_type == 'unit':
course_component = course.find_unit_by_id(key)
elif component_type == 'lesson':
course_component = course.find_lesson_by_id(None, key)
else:
transforms.send_json_response(
self, 401, 'Invalid key.', {'key': key})
return
set_draft = self.request.get('set_draft')
if set_draft == '1':
set_draft = True
elif set_draft == '0':
set_draft = False
else:
transforms.send_json_response(
self, 401, 'Invalid set_draft value, expected 0 or 1.',
{'set_draft': set_draft}
)
return
course_component.now_available = not set_draft
course.save()
transforms.send_json_response(
self,
200,
'Draft status set to %s.' % (
DRAFT_TEXT if set_draft else PUBLISHED_TEXT
), {
'is_draft': set_draft
}
)
return
def _render_edit_form_for(
self, rest_handler_cls, title, annotations_dict=None,
delete_xsrf_token='delete-unit', page_description=None,
extra_js_files=None):
"""Renders an editor form for a given REST handler class."""
if not annotations_dict:
annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT
key = self.request.get('key')
extra_args = {}
if self.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(rest_handler_cls.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(delete_xsrf_token))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self,
rest_handler_cls.SCHEMA_JSON,
annotations_dict,
key, rest_url, exit_url,
extra_args=extra_args,
delete_url=delete_url, delete_method='delete',
read_only=not self.app_context.is_editable_fs(),
required_modules=rest_handler_cls.REQUIRED_MODULES,
extra_js_files=extra_js_files)
template_values = {}
template_values['page_title'] = self.format_title('Edit %s' % title)
if page_description:
template_values['page_description'] = page_description
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit(self):
"""Shows unit editor."""
self._render_edit_form_for(
UnitRESTHandler, 'Unit',
page_description=messages.UNIT_EDITOR_DESCRIPTION,
annotations_dict=UnitRESTHandler.get_annotations_dict(
courses.Course(self), int(self.request.get('key'))))
def get_edit_link(self):
"""Shows link editor."""
self._render_edit_form_for(
LinkRESTHandler, 'Link',
page_description=messages.LINK_EDITOR_DESCRIPTION)
def get_edit_assessment(self):
"""Shows assessment editor."""
self._render_edit_form_for(
AssessmentRESTHandler, 'Assessment',
page_description=messages.ASSESSMENT_EDITOR_DESCRIPTION,
extra_js_files=['assessment_editor_lib.js', 'assessment_editor.js'])
def get_edit_lesson(self):
"""Shows the lesson/activity editor."""
self._render_edit_form_for(
LessonRESTHandler, 'Lessons and Activities',
annotations_dict=LessonRESTHandler.get_schema_annotations_dict(
courses.Course(self).get_units()),
delete_xsrf_token='delete-lesson',
extra_js_files=LessonRESTHandler.EXTRA_JS_FILES)
class CommonUnitRESTHandler(BaseRESTHandler):
"""A common super class for all unit REST handlers."""
def unit_to_dict(self, unused_unit):
"""Converts a unit to a dictionary representation."""
raise Exception('Not implemented')
def apply_updates(
self, unused_unit, unused_updated_unit_dict, unused_errors):
"""Applies changes to a unit; modifies unit input argument."""
raise Exception('Not implemented')
def unit_to_dict_common(self, unit):
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'description': unit.description or '',
'is_draft': not unit.now_available,
'label_groups': self.labels_to_dict(unit),
}
def labels_to_dict(self, unit):
course = courses.Course(self)
parent_unit = course.get_parent_unit(unit.unit_id)
all_labels = m_models.LabelDAO.get_all()
unit_labels = common_utils.text_to_list(unit.labels)
label_groups = []
for label_type in sorted(m_models.LabelDTO.LABEL_TYPES,
lambda a, b: cmp(a.menu_order, b.menu_order)):
# If unit has a parent, don't even bother showing the UI elements
# for setting tracks.
if (parent_unit and
label_type.type == m_models.LabelDTO.LABEL_TYPE_COURSE_TRACK):
continue
label_group = []
for label in sorted(all_labels, lambda a, b: cmp(a.title, b.title)):
if label.type == label_type.type:
label_group.append({
'id': label.id,
'title': label.title,
'description': label.description,
'checked': str(label.id) in unit_labels,
'no_labels': '',
})
if not label_group:
label_group.append({
'id': -1,
'title': '',
'description': '',
'checked': False,
'no_labels': '-- No labels of this type --',
})
label_groups.append({
'title': label_type.title,
'labels': label_group,
})
return label_groups
def apply_updates_common(self, course, unit, updated_unit_dict, errors):
"""Apply changes common to all unit types."""
unit.title = updated_unit_dict.get('title')
unit.description = updated_unit_dict.get('description')
unit.now_available = not updated_unit_dict.get('is_draft')
labels = set()
for label_group in updated_unit_dict['label_groups']:
for label in label_group['labels']:
if label['checked'] and label['id'] > 0:
labels.add(label['id'])
if course.get_parent_unit(unit.unit_id):
track_label_ids = m_models.LabelDAO.get_set_of_ids_of_type(
m_models.LabelDTO.LABEL_TYPE_COURSE_TRACK)
if track_label_ids.intersection(labels):
errors.append('Cannot set track labels on entities which '
'are used within other units.')
unit.labels = common_utils.list_to_text(labels)
def get(self):
"""A GET REST method shared by all unit types."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
message = ['Success.']
if self.request.get('is_newly_created'):
unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower()
message.append(
'New %s has been created and saved.' % unit_type)
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=self.unit_to_dict(unit),
xsrf_token=XsrfTokenManager.create_xsrf_token('put-unit'))
def put(self):
"""A PUT REST method shared by all unit types."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updated_unit_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
errors = []
self.apply_updates(unit, updated_unit_dict, errors)
if not errors:
course = courses.Course(self)
assert course.update_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-unit', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
course.delete_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
def generate_unit_schema():
schema = generate_common_schema('Unit')
schema.add_property(SchemaField(
'unit_header', 'Unit Header', 'html', optional=True,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.DESCRIPTIVE_SCOPE,
'className': 'inputEx-Field html-content'}))
schema.add_property(SchemaField(
'pre_assessment', 'Pre Assessment', 'integer', optional=True))
schema.add_property(SchemaField(
'post_assessment', 'Post Assessment', 'integer', optional=True))
schema.add_property(SchemaField(
'show_contents_on_one_page', 'Show Contents on One Page', 'boolean',
optional=True,
description='Whether to show all assessments, lessons, and activities '
'in a Unit on one page, or to show each on its own page.'))
schema.add_property(SchemaField(
'manual_progress', 'Manual Progress', 'boolean', optional=True,
description='When set, the manual progress REST API permits '
'users to manually mark a unit or lesson as complete, '
'overriding the automatic progress tracking.'))
schema.add_property(SchemaField(
'unit_footer', 'Unit Footer', 'html', optional=True,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.DESCRIPTIVE_SCOPE,
'className': 'inputEx-Field html-content'}))
return schema
class UnitRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to unit."""
URI = '/rest/course/unit'
SCHEMA = generate_unit_schema()
SCHEMA_JSON = SCHEMA.get_json_schema()
SCHEMA_DICT = SCHEMA.get_json_schema_dict()
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable',
'inputex-list', 'inputex-hidden', 'inputex-number', 'inputex-integer',
'inputex-checkbox', 'gcb-rte']
@classmethod
def get_annotations_dict(cls, course, this_unit_id):
# The set of available assesments needs to be dynamically
# generated and set as selection choices on the form.
# We want to only show assessments that are not already
# selected by other units.
available_assessments = {}
referenced_assessments = {}
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
model_version = course.get_assessment_model_version(unit)
track_labels = course.get_unit_track_labels(unit)
# Don't allow selecting old-style assessments, which we
# can't display within Unit page.
# Don't allow selection of assessments with parents
if (model_version != courses.ASSESSMENT_MODEL_VERSION_1_4 and
not track_labels):
available_assessments[unit.unit_id] = unit
elif (unit.type == verify.UNIT_TYPE_UNIT and
this_unit_id != unit.unit_id):
if unit.pre_assessment:
referenced_assessments[unit.pre_assessment] = True
if unit.post_assessment:
referenced_assessments[unit.post_assessment] = True
for referenced in referenced_assessments:
if referenced in available_assessments:
del available_assessments[referenced]
schema = generate_unit_schema()
choices = [(-1, '-- None --')]
for assessment_id in sorted(available_assessments):
choices.append(
(assessment_id, available_assessments[assessment_id].title))
schema.get_property('pre_assessment').set_select_data(choices)
schema.get_property('post_assessment').set_select_data(choices)
return schema.get_schema_dict()
def unit_to_dict(self, unit):
assert unit.type == 'U'
ret = self.unit_to_dict_common(unit)
ret['unit_header'] = unit.unit_header or ''
ret['unit_footer'] = unit.unit_footer or ''
ret['pre_assessment'] = unit.pre_assessment or -1
ret['post_assessment'] = unit.post_assessment or -1
ret['show_contents_on_one_page'] = (
unit.show_contents_on_one_page or False)
ret['manual_progress'] = unit.manual_progress or False
return ret
def _is_assessment_unused(self, course, unit, assessment, errors):
parent_unit = course.get_parent_unit(assessment.unit_id)
if parent_unit and parent_unit.unit_id != unit.unit_id:
errors.append(
'Assessment "%s" is already asssociated to unit "%s"' % (
assessment.title, parent_unit.title))
return False
return True
def _is_assessment_version_ok(self, course, assessment, errors):
# Here, we want to establish that the display model for the
# assessment is compatible with the assessment being used in
# the context of a Unit. Model version 1.4 is not, because
# the way sets up submission is to build an entirely new form
# from JavaScript (independent of the form used to display the
# assessment), and the way it learns the ID of the assessment
# is by looking in the URL (as opposed to taking a parameter).
# This is incompatible with the URLs for unit display, so we
# just disallow older assessments here.
model_version = course.get_assessment_model_version(assessment)
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
errors.append(
'The version of assessment "%s" ' % assessment.title +
'is not compatible with use as a pre/post unit element')
return False
return True
def _is_assessment_on_track(self, course, assessment, errors):
if course.get_unit_track_labels(assessment):
errors.append(
'Assessment "%s" has track labels, ' % assessment.title +
'so it cannot be used as a pre/post unit element')
return True
return False
def apply_updates(self, unit, updated_unit_dict, errors):
course = courses.Course(self)
self.apply_updates_common(course, unit, updated_unit_dict, errors)
unit.unit_header = updated_unit_dict['unit_header']
unit.unit_footer = updated_unit_dict['unit_footer']
unit.pre_assessment = None
unit.post_assessment = None
unit.manual_progress = updated_unit_dict['manual_progress']
pre_assessment_id = updated_unit_dict['pre_assessment']
if pre_assessment_id >= 0:
assessment = course.find_unit_by_id(pre_assessment_id)
if (self._is_assessment_unused(course, unit, assessment, errors) and
self._is_assessment_version_ok(course, assessment, errors) and
not self._is_assessment_on_track(course, assessment, errors)):
unit.pre_assessment = pre_assessment_id
post_assessment_id = updated_unit_dict['post_assessment']
if post_assessment_id >= 0 and pre_assessment_id == post_assessment_id:
errors.append(
'The same assessment cannot be used as both the pre '
'and post assessment of a unit.')
elif post_assessment_id >= 0:
assessment = course.find_unit_by_id(post_assessment_id)
if (assessment and
self._is_assessment_unused(course, unit, assessment, errors) and
self._is_assessment_version_ok(course, assessment, errors) and
not self._is_assessment_on_track(course, assessment, errors)):
unit.post_assessment = post_assessment_id
unit.show_contents_on_one_page = (
updated_unit_dict['show_contents_on_one_page'])
def generate_link_schema():
schema = generate_common_schema('Link')
schema.add_property(SchemaField(
'url', 'URL', 'string', optional=True,
description=messages.LINK_EDITOR_URL_DESCRIPTION))
return schema
class LinkRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to link."""
URI = '/rest/course/link'
SCHEMA = generate_link_schema()
SCHEMA_JSON = SCHEMA.get_json_schema()
SCHEMA_DICT = SCHEMA.get_json_schema_dict()
SCHEMA_ANNOTATIONS_DICT = SCHEMA.get_schema_dict()
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable',
'inputex-list', 'inputex-hidden', 'inputex-number', 'inputex-checkbox']
def unit_to_dict(self, unit):
assert unit.type == 'O'
ret = self.unit_to_dict_common(unit)
ret['url'] = unit.href
return ret
def apply_updates(self, unit, updated_unit_dict, errors):
course = courses.Course(self)
self.apply_updates_common(course, unit, updated_unit_dict, errors)
unit.href = updated_unit_dict.get('url')
class ImportCourseRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to course import."""
URI = '/rest/course/import'
SCHEMA_JSON = """
{
"id": "Import Course Entity",
"type": "object",
"description": "Import Course",
"properties": {
"course" : {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
@classmethod
def _get_course_list(cls):
# Make a list of courses user has the rights to.
course_list = []
for acourse in sites.get_all_courses():
if not roles.Roles.is_course_admin(acourse):
continue
if acourse == sites.get_course_for_current_request():
continue
course_list.append({
'value': acourse.raw,
'label': cgi.escape(acourse.get_title())})
return course_list
@classmethod
def SCHEMA_ANNOTATIONS_DICT(cls): # pylint: disable-msg=g-bad-name
"""Schema annotations are dynamic and include a list of courses."""
course_list = cls._get_course_list()
if not course_list:
return None
# Format annotations.
return [
(['title'], 'Import Course'),
(
['properties', 'course', '_inputex'],
{
'label': 'Available Courses',
'_type': 'select',
'choices': course_list})]
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
first_course_in_dropdown = self._get_course_list()[0]['value']
transforms.send_json_response(
self, 200, None,
payload_dict={'course': first_course_in_dropdown},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'import-course'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'import-course', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
course_raw = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)['course']
source = None
for acourse in sites.get_all_courses():
if acourse.raw == course_raw:
source = acourse
break
if not source:
transforms.send_json_response(
self, 404, 'Object not found.', {'raw': course_raw})
return
course = courses.Course(self)
errors = []
try:
course.import_from(source, errors)
except Exception as e: # pylint: disable-msg=broad-except
logging.exception(e)
errors.append('Import failed: %s' % e)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
course.save()
transforms.send_json_response(self, 200, 'Imported.')
def workflow_key(key):
return 'workflow:%s' % key
def create_assessment_registry():
"""Create the registry for course properties."""
reg = FieldRegistry('Assessment Entity', description='Assessment')
# Course level settings.
course_opts = generate_common_schema('Assessment Config')
course_opts.add_property(
SchemaField('weight', 'Weight', 'string', optional=True))
course_opts.add_property(SchemaField(
'content', 'Assessment Content', 'text', optional=True,
description=str(messages.ASSESSMENT_CONTENT_DESCRIPTION),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
course_opts.add_property(SchemaField(
'html_content', 'Assessment Content (HTML)', 'html', optional=True,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE,
'className': 'inputEx-Field html-content'}))
course_opts.add_property(SchemaField(
'html_check_answers', '"Check Answers" Buttons', 'boolean',
optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
course_opts.add_property(
SchemaField(workflow_key(courses.SUBMISSION_DUE_DATE_KEY),
'Submission Due Date', 'string', optional=True,
description=str(messages.DUE_DATE_FORMAT_DESCRIPTION)))
course_opts.add_property(
SchemaField(workflow_key(courses.GRADER_KEY), 'Grading Method',
'string',
select_data=ALLOWED_GRADERS_NAMES.items()))
reg.add_sub_registry('assessment', 'Assessment Config',
registry=course_opts)
review_opts = reg.add_sub_registry(
'review_opts', 'Review Config',
description=str(messages.ASSESSMENT_DETAILS_DESCRIPTION))
if len(ALLOWED_MATCHERS_NAMES) > 1:
review_opts.add_property(
SchemaField(workflow_key(courses.MATCHER_KEY), 'Review Matcher',
'string', optional=True,
select_data=ALLOWED_MATCHERS_NAMES.items()))
review_opts.add_property(
SchemaField(
'review_form', 'Reviewer Feedback Form', 'text', optional=True,
description=str(messages.REVIEWER_FEEDBACK_FORM_DESCRIPTION),
extra_schema_dict_values={
'className': 'inputEx-Field review-form'}))
review_opts.add_property(SchemaField(
'html_review_form', 'Reviewer Feedback Form (HTML)', 'html',
optional=True,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE,
'className': 'inputEx-Field html-review-form'}))
review_opts.add_property(
SchemaField(
workflow_key(courses.REVIEW_DUE_DATE_KEY),
'Review Due Date', 'string', optional=True,
description=str(messages.REVIEW_DUE_DATE_FORMAT_DESCRIPTION)))
review_opts.add_property(
SchemaField(workflow_key(courses.REVIEW_MIN_COUNT_KEY),
'Review Min Count', 'integer', optional=True,
description=str(messages.REVIEW_MIN_COUNT_DESCRIPTION)))
review_opts.add_property(
SchemaField(workflow_key(courses.REVIEW_WINDOW_MINS_KEY),
'Review Window Timeout', 'integer', optional=True,
description=str(messages.REVIEW_TIMEOUT_IN_MINUTES)))
return reg
class AssessmentRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to assessment."""
URI = '/rest/course/assessment'
REG = create_assessment_registry()
SCHEMA_JSON = REG.get_json_schema()
SCHEMA_DICT = REG.get_json_schema_dict()
SCHEMA_ANNOTATIONS_DICT = REG.get_schema_dict()
REQUIRED_MODULES = [
'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-textarea',
'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-checkbox', 'inputex-list']
def _get_assessment_path(self, unit):
return self.app_context.fs.impl.physical_to_logical(
courses.Course(self).get_assessment_filename(unit.unit_id))
def _get_review_form_path(self, unit):
return self.app_context.fs.impl.physical_to_logical(
courses.Course(self).get_review_form_filename(unit.unit_id))
def unit_to_dict(self, unit):
"""Assemble a dict with the unit data fields."""
assert unit.type == 'A'
path = self._get_assessment_path(unit)
fs = self.app_context.fs
if fs.isfile(path):
content = fs.get(path)
else:
content = ''
review_form_path = self._get_review_form_path(unit)
if review_form_path and fs.isfile(review_form_path):
review_form = fs.get(review_form_path)
else:
review_form = ''
workflow = unit.workflow
if workflow.get_submission_due_date():
submission_due_date = workflow.get_submission_due_date().strftime(
courses.ISO_8601_DATE_FORMAT)
else:
submission_due_date = ''
if workflow.get_review_due_date():
review_due_date = workflow.get_review_due_date().strftime(
courses.ISO_8601_DATE_FORMAT)
else:
review_due_date = ''
unit_common = self.unit_to_dict_common(unit)
unit_common.update({
'weight': str(unit.weight if hasattr(unit, 'weight') else 0),
'content': content,
'html_content': (unit.html_content or ''
if hasattr(unit, 'html_content') else ''),
'html_check_answers': (
unit.html_check_answers
if hasattr(unit, 'html_check_answers') else False),
workflow_key(courses.SUBMISSION_DUE_DATE_KEY): (
submission_due_date),
workflow_key(courses.GRADER_KEY): workflow.get_grader(),
})
return {
'assessment': unit_common,
'review_opts': {
workflow_key(courses.MATCHER_KEY): workflow.get_matcher(),
workflow_key(courses.REVIEW_DUE_DATE_KEY): review_due_date,
workflow_key(courses.REVIEW_MIN_COUNT_KEY): (
workflow.get_review_min_count()),
workflow_key(courses.REVIEW_WINDOW_MINS_KEY): (
workflow.get_review_window_mins()),
'review_form': review_form,
'html_review_form': (
unit.html_review_form or ''
if hasattr(unit, 'html_review_form') else ''),
}
}
def apply_updates(self, unit, updated_unit_dict, errors):
"""Store the updated assessment."""
course = courses.Course(self)
entity_dict = {}
AssessmentRESTHandler.REG.convert_json_to_entity(
updated_unit_dict, entity_dict)
self.apply_updates_common(course, unit, entity_dict, errors)
try:
unit.weight = int(entity_dict.get('weight'))
if unit.weight < 0:
errors.append('The weight must be a non-negative integer.')
except ValueError:
errors.append('The weight must be an integer.')
content = entity_dict.get('content')
if content:
course.set_assessment_content(
unit, entity_dict.get('content'), errors=errors)
unit.html_content = entity_dict.get('html_content')
unit.html_check_answers = entity_dict.get('html_check_answers')
workflow_dict = entity_dict.get('workflow')
if len(ALLOWED_MATCHERS_NAMES) == 1:
workflow_dict[courses.MATCHER_KEY] = (
ALLOWED_MATCHERS_NAMES.keys()[0])
unit.workflow_yaml = yaml.safe_dump(workflow_dict)
unit.workflow.validate(errors=errors)
# Only save the review form if the assessment needs human grading.
if not errors:
if course.needs_human_grader(unit):
review_form = entity_dict.get('review_form')
if review_form:
course.set_review_form(
unit, review_form, errors=errors)
unit.html_review_form = entity_dict.get('html_review_form')
elif entity_dict.get('review_form'):
errors.append(
'Review forms for auto-graded assessments should be empty.')
class UnitLessonTitleRESTHandler(BaseRESTHandler):
"""Provides REST API to unit and lesson titles."""
URI = '/rest/course/outline'
SCHEMA_JSON = """
{
"type": "object",
"description": "Course Outline",
"properties": {
"outline": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"},
"lessons": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"}
}
}
}
}
}
}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Course Outline'),
(['_inputex'], {'className': 'organizer'}),
(['properties', 'outline', '_inputex'], {
'sortable': 'true',
'label': ''}),
([
'properties', 'outline', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'id', '_inputex'], {
'_type': 'hidden'}),
(['properties', 'outline', 'items', 'properties', 'lessons',
'_inputex'], {
'sortable': 'true',
'label': '',
'listAddLabel': 'Add a new lesson',
'listRemoveLabel': 'Delete'}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'id', '_inputex'], {
'_type': 'hidden'})
]
REQUIRED_MODULES = [
'inputex-hidden', 'inputex-list', 'inputex-string',
'inputex-uneditable']
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
course = courses.Course(self)
outline_data = []
for unit in course.get_units():
lesson_data = []
for lesson in course.get_lessons(unit.unit_id):
lesson_data.append({
'title': lesson.title,
'id': lesson.lesson_id})
unit_title = unit.title
if verify.UNIT_TYPE_UNIT == unit.type:
unit_title = 'Unit: %s' % unit.title
outline_data.append({
'title': unit_title,
'id': unit.unit_id,
'lessons': lesson_data})
transforms.send_json_response(
self, 200, None,
payload_dict={'outline': outline_data},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'unit-lesson-reorder'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'unit-lesson-reorder', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
payload_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
course = courses.Course(self)
course.reorder_units(payload_dict['outline'])
course.save()
transforms.send_json_response(self, 200, 'Saved.')
class LessonRESTHandler(BaseRESTHandler):
"""Provides REST API to handle lessons and activities."""
URI = '/rest/course/lesson'
# Note GcbRte relies on the structure of this schema. Do not change without
# checking the dependency.
SCHEMA_JSON = """
{
"id": "Lesson Entity",
"type": "object",
"description": "Lesson",
"properties": {
"key" : {"type": "string"},
"title" : {"type": "string"},
"unit_id": {"type": "string"},
"video" : {"type": "string", "optional": true},
"scored": {"type": "string"},
"objectives" : {
"type": "string", "format": "html", "optional": true},
"notes" : {"type": "string", "optional": true},
"activity_title" : {"type": "string", "optional": true},
"activity_listed" : {"type": "boolean", "optional": true},
"activity": {"type": "string", "format": "text", "optional": true},
"manual_progress" : {"type": "boolean", "optional": true},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'gcb-rte', 'inputex-select', 'inputex-textarea',
'inputex-uneditable', 'inputex-checkbox']
EXTRA_JS_FILES = ['lesson_editor_lib.js', 'lesson_editor.js']
@classmethod
def get_schema_annotations_dict(cls, units):
unit_list = []
for unit in units:
if unit.type == 'U':
unit_list.append({
'label': cgi.escape(utils.display_unit_title(unit)),
'value': unit.unit_id})
return [
(['title'], 'Lesson'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable',
'className': 'inputEx-Field keyHolder'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'unit_id', '_inputex'], {
'label': 'Parent Unit', '_type': 'select',
'choices': unit_list}),
(['properties', 'scored', '_inputex'], {
'_type': 'select',
'choices': [
{'label': 'Questions are scored', 'value': 'scored'},
{
'label': 'Questions only give feedback',
'value': 'not_scored'}],
'label': 'Scored',
'description': messages.LESSON_SCORED_DESCRIPTION}),
# TODO(sll): The internal 'objectives' property should also be
# renamed.
(['properties', 'objectives', '_inputex'], {
'label': 'Lesson Body',
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'description': messages.LESSON_OBJECTIVES_DESCRIPTION}),
(['properties', 'video', '_inputex'], {
'label': 'Video ID',
'description': messages.LESSON_VIDEO_ID_DESCRIPTION}),
(['properties', 'notes', '_inputex'], {
'label': 'Notes',
'description': messages.LESSON_NOTES_DESCRIPTION}),
(['properties', 'activity_title', '_inputex'], {
'label': 'Activity Title',
'description': messages.LESSON_ACTIVITY_TITLE_DESCRIPTION}),
(['properties', 'activity_listed', '_inputex'], {
'label': 'Activity Listed',
'description': messages.LESSON_ACTIVITY_LISTED_DESCRIPTION}),
(['properties', 'activity', '_inputex'], {
'label': 'Activity',
'description': str(messages.LESSON_ACTIVITY_DESCRIPTION),
'className': 'inputEx-Field activityHolder'}),
(['properties', 'manual_progress', '_inputex'], {
'label': 'Manual Progress', '_type:': 'boolean',
'description': str(
messages.LESSON_MANUAL_PROGRESS_DESCRIPTION)}),
STATUS_ANNOTATION]
def get(self):
"""Handles GET REST verb and returns lesson object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
key = self.request.get('key')
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
assert lesson
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if lesson.has_activity and fs.isfile(path):
activity = fs.get(path)
else:
activity = ''
payload_dict = {
'key': key,
'title': lesson.title,
'unit_id': lesson.unit_id,
'scored': 'scored' if lesson.scored else 'not_scored',
'objectives': lesson.objectives,
'video': lesson.video,
'notes': lesson.notes,
'activity_title': lesson.activity_title,
'activity_listed': lesson.activity_listed,
'activity': activity,
'manual_progress': lesson.manual_progress or False,
'is_draft': not lesson.now_available
}
message = ['Success.']
if self.request.get('is_newly_created'):
message.append('New lesson has been created and saved.')
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit'))
def put(self):
"""Handles PUT REST verb to save lesson and associated activity."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'lesson-edit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updates_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
lesson.title = updates_dict['title']
lesson.unit_id = updates_dict['unit_id']
lesson.scored = (updates_dict['scored'] == 'scored')
lesson.objectives = updates_dict['objectives']
lesson.video = updates_dict['video']
lesson.notes = updates_dict['notes']
lesson.activity_title = updates_dict['activity_title']
lesson.activity_listed = updates_dict['activity_listed']
lesson.manual_progress = updates_dict['manual_progress']
lesson.now_available = not updates_dict['is_draft']
activity = updates_dict.get('activity', '').strip()
errors = []
if activity:
lesson.has_activity = True
course.set_activity_content(lesson, activity, errors=errors)
else:
lesson.has_activity = False
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if fs.isfile(path):
fs.delete(path)
if not errors:
assert course.update_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-lesson', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
assert course.delete_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
def generate_instanceid():
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
length = 12
return ''.join([random.choice(chars) for unused_i in xrange(length)])
class CollisionError(Exception):
"""Exception raised to show that a collision in a namespace has occurred."""
class ImportActivityRESTHandler(BaseRESTHandler):
"""REST handler for requests to import an activity into the lesson body."""
URI = '/rest/course/lesson/activity'
VERSION = '1.5'
def put(self):
"""Handle REST PUT instruction to import an assignment."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(request, 'lesson-edit', {}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
text = request.get('text')
try:
content, noverify_text = verify.convert_javascript_to_python(
text, 'activity')
activity = verify.evaluate_python_expression_from_text(
content, 'activity', verify.Activity().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
transforms.send_json_response(
self, 412, 'Unable to parse activity.')
return
try:
verify.Verifier().verify_activity_instance(activity, 'none')
except verify.SchemaException:
transforms.send_json_response(
self, 412, 'Unable to validate activity.')
return
self.course = courses.Course(self)
self.lesson = self.course.find_lesson_by_id(None, key)
self.unit = self.course.find_unit_by_id(self.lesson.unit_id)
self.question_number = 0
self.question_descriptions = set(
[q.description for q in m_models.QuestionDAO.get_all()])
self.question_group_descriptions = set(
[qg.description for qg in m_models.QuestionGroupDAO.get_all()])
lesson_content = []
try:
for item in activity['activity']:
if isinstance(item, basestring):
lesson_content.append(item)
else:
question_tag = self.import_question(item)
lesson_content.append(question_tag)
self.question_number += 1
except CollisionError:
transforms.send_json_response(
self, 412, (
'This activity has already been imported. Remove duplicate '
'imported questions from the question bank in order to '
're-import.'))
return
except Exception as ex:
transforms.send_json_response(
self, 412, 'Unable to convert: %s' % ex)
return
transforms.send_json_response(self, 200, 'OK.', payload_dict={
'content': '\n'.join(lesson_content)
})
def _get_question_description(self):
return (
'Imported from unit "%s", lesson "%s" (question #%s)' % (
self.unit.title, self.lesson.title, self.question_number + 1))
def _insert_question(self, question_dict, question_type):
question = m_models.QuestionDTO(None, question_dict)
question.type = question_type
return m_models.QuestionDAO.save(question)
def _insert_question_group(self, question_group_dict):
question_group = m_models.QuestionGroupDTO(None, question_group_dict)
return m_models.QuestionGroupDAO.save(question_group)
def import_question(self, item):
question_type = item['questionType']
if question_type == 'multiple choice':
question_dict = self.import_multiple_choice(item)
quid = self._insert_question(
question_dict, m_models.QuestionDTO.MULTIPLE_CHOICE)
return '<question quid="%s" instanceid="%s"></question>' % (
quid, generate_instanceid())
elif question_type == 'multiple choice group':
question_group_dict = self.import_multiple_choice_group(item)
qgid = self._insert_question_group(question_group_dict)
return (
'<question-group qgid="%s" instanceid="%s">'
'</question-group>') % (
qgid, generate_instanceid())
elif question_type == 'freetext':
question_dict = self.import_freetext(item)
quid = self._insert_question(
question_dict, m_models.QuestionDTO.SHORT_ANSWER)
return '<question quid="%s" instanceid="%s"></question>' % (
quid, generate_instanceid())
else:
raise ValueError('Unknown question type: %s' % question_type)
def import_multiple_choice(self, orig_question):
description = self._get_question_description()
if description in self.question_descriptions:
raise CollisionError()
return {
'version': self.VERSION,
'description': description,
'question': '',
'multiple_selections': False,
'choices': [
{
'text': choice[0],
'score': 1.0 if choice[1].value else 0.0,
'feedback': choice[2]
} for choice in orig_question['choices']]}
def import_multiple_choice_group(self, mc_choice_group):
"""Import a 'multiple choice group' as a question group."""
description = self._get_question_description()
if description in self.question_group_descriptions:
raise CollisionError()
question_group_dict = {
'version': self.VERSION,
'description': description}
question_list = []
for index, question in enumerate(mc_choice_group['questionsList']):
question_dict = self.import_multiple_choice_group_question(
question, index)
question = m_models.QuestionDTO(None, question_dict)
question.type = m_models.QuestionDTO.MULTIPLE_CHOICE
question_list.append(question)
quid_list = m_models.QuestionDAO.save_all(question_list)
question_group_dict['items'] = [{
'question': str(quid),
'weight': 1.0} for quid in quid_list]
return question_group_dict
def import_multiple_choice_group_question(self, orig_question, index):
"""Import the questions from a group as individual questions."""
# TODO(jorr): Handle allCorrectOutput and someCorrectOutput
description = (
'Imported from unit "%s", lesson "%s" (question #%s, part #%s)' % (
self.unit.title, self.lesson.title, self.question_number + 1,
index + 1))
if description in self.question_descriptions:
raise CollisionError()
correct_index = orig_question['correctIndex']
multiple_selections = not isinstance(correct_index, int)
if multiple_selections:
partial = 1.0 / len(correct_index)
choices = [{
'text': text,
'score': partial if i in correct_index else -1.0
} for i, text in enumerate(orig_question['choices'])]
else:
choices = [{
'text': text,
'score': 1.0 if i == correct_index else 0.0
} for i, text in enumerate(orig_question['choices'])]
return {
'version': self.VERSION,
'description': description,
'question': orig_question.get('questionHTML') or '',
'multiple_selections': multiple_selections,
'choices': choices}
def import_freetext(self, orig_question):
description = self._get_question_description()
if description in self.question_descriptions:
raise CollisionError()
return {
'version': self.VERSION,
'description': description,
'question': '',
'hint': orig_question['showAnswerOutput'],
'graders': [{
'score': 1.0,
'matcher': 'regex',
'response': orig_question['correctAnswerRegex'].value,
'feedback': orig_question.get('correctAnswerOutput')
}],
'defaultFeedback': orig_question.get('incorrectAnswerOutput')}
class ExportAssessmentRESTHandler(BaseRESTHandler):
"""REST handler for requests to export an activity into new format."""
URI = '/rest/course/asessment/export'
VERSION = '1.5'
# pylint: disable-msg=too-many-statements
def put(self):
"""Handle the PUT verb to export an assessment."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
raw_assessment_dict = transforms.json_to_dict(
request.get('payload'), AssessmentRESTHandler.SCHEMA_DICT)
entity_dict = {}
AssessmentRESTHandler.REG.convert_json_to_entity(
raw_assessment_dict, entity_dict)
course = courses.Course(self)
self.unit = course.find_unit_by_id(key)
self.question_descriptions = set(
[q.description for q in m_models.QuestionDAO.get_all()])
# Import all the assessment context except the questions
new_unit = course.add_assessment()
errors = []
new_unit.title = 'Exported from %s ' % entity_dict.get('title')
try:
new_unit.weight = int(entity_dict.get('weight'))
if new_unit.weight < 0:
errors.append('The weight must be a non-negative integer.')
except ValueError:
errors.append('The weight must be an integer.')
new_unit.now_available = not entity_dict.get('is_draft')
workflow_dict = entity_dict.get('workflow')
if len(ALLOWED_MATCHERS_NAMES) == 1:
workflow_dict[courses.MATCHER_KEY] = (
ALLOWED_MATCHERS_NAMES.keys()[0])
new_unit.workflow_yaml = yaml.safe_dump(workflow_dict)
new_unit.workflow.validate(errors=errors)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
assessment_dict = self.get_assessment_dict(entity_dict.get('content'))
if assessment_dict is None:
return
if assessment_dict.get('checkAnswers'):
new_unit.html_check_answers = assessment_dict['checkAnswers'].value
# Import the questions in the assessment and the review questionnaire
html_content = []
html_review_form = []
if assessment_dict.get('preamble'):
html_content.append(assessment_dict['preamble'])
# prepare all the dtos for the questions in the assigment content
question_dtos = self.get_question_dtos(
assessment_dict,
'Imported from assessment "%s" (question #%s)')
if question_dtos is None:
return
# prepare the questions for the review questionnaire, if necessary
review_dtos = []
if course.needs_human_grader(new_unit):
review_str = entity_dict.get('review_form')
review_dict = self.get_assessment_dict(review_str)
if review_dict is None:
return
if review_dict.get('preamble'):
html_review_form.append(review_dict['preamble'])
review_dtos = self.get_question_dtos(
review_dict,
'Imported from assessment "%s" (review question #%s)')
if review_dtos is None:
return
# batch submit the questions and split out their resulting id's
all_dtos = question_dtos + review_dtos
all_ids = m_models.QuestionDAO.save_all(all_dtos)
question_ids = all_ids[:len(question_dtos)]
review_ids = all_ids[len(question_dtos):]
# insert question tags for the assessment content
for quid in question_ids:
html_content.append(
str(safe_dom.Element(
'question',
quid=str(quid), instanceid=generate_instanceid())))
new_unit.html_content = '\n'.join(html_content)
# insert question tags for the review questionnaire
for quid in review_ids:
html_review_form.append(
str(safe_dom.Element(
'question',
quid=str(quid), instanceid=generate_instanceid())))
new_unit.html_review_form = '\n'.join(html_review_form)
course.save()
transforms.send_json_response(
self, 200, (
'The assessment has been exported to "%s".' % new_unit.title),
payload_dict={'key': key})
def get_assessment_dict(self, assessment_content):
"""Validate the assessment scipt and return as a python dict."""
try:
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, 'assessment')
assessment = verify.evaluate_python_expression_from_text(
content, 'assessment', verify.Assessment().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
transforms.send_json_response(
self, 412, 'Unable to parse asessment.')
return None
try:
verify.Verifier().verify_assessment_instance(assessment, 'none')
except verify.SchemaException:
transforms.send_json_response(
self, 412, 'Unable to validate assessment')
return None
return assessment['assessment']
def get_question_dtos(self, assessment_dict, description_template):
"""Convert the assessment into a list of QuestionDTO's."""
question_dtos = []
try:
for i, question in enumerate(assessment_dict['questionsList']):
description = description_template % (self.unit.title, (i + 1))
if description in self.question_descriptions:
raise CollisionError()
question_dto = self.import_question(question)
question_dto.dict['description'] = description
question_dtos.append(question_dto)
except CollisionError:
transforms.send_json_response(
self, 412, (
'This assessment has already been imported. Remove '
'duplicate imported questions from the question bank in '
'order to re-import.'))
return None
except Exception as ex:
transforms.send_json_response(
self, 412, 'Unable to convert: %s' % ex)
return None
return question_dtos
def import_question(self, question):
"""Convert a single question into a QuestioDTO."""
if 'choices' in question:
question_dict = self.import_multiple_choice_question(question)
question_type = m_models.QuestionDTO.MULTIPLE_CHOICE
elif 'correctAnswerNumeric' in question:
question_dict = self.import_short_answer_question(
question.get('questionHTML'),
'numeric',
question.get('correctAnswerNumeric'))
question_type = m_models.QuestionDTO.SHORT_ANSWER
elif 'correctAnswerString' in question:
question_dict = self.import_short_answer_question(
question.get('questionHTML'),
'case_insensitive',
question.get('correctAnswerString'))
question_type = m_models.QuestionDTO.SHORT_ANSWER
elif 'correctAnswerRegex' in question:
question_dict = self.import_short_answer_question(
question.get('questionHTML'),
'regex',
question.get('correctAnswerRegex').value)
question_type = m_models.QuestionDTO.SHORT_ANSWER
else:
raise ValueError('Unknown question type')
question_dto = m_models.QuestionDTO(None, question_dict)
question_dto.type = question_type
return question_dto
def import_multiple_choice_question(self, question):
"""Assemble the dict for a multiple choice question."""
question_dict = {
'version': self.VERSION,
'question': question.get('questionHTML') or '',
'multiple_selections': False
}
choices = []
for choice in question.get('choices'):
if isinstance(choice, basestring):
text = choice
score = 0.0
else:
text = choice.value
score = 1.0
choices.append({
'text': text,
'score': score
})
question_dict['choices'] = choices
return question_dict
def import_short_answer_question(self, question_html, matcher, response):
return {
'version': self.VERSION,
'question': question_html or '',
'graders': [{
'score': 1.0,
'matcher': matcher,
'response': response,
}]
}
| {
"content_hash": "26fbed808849448cd7cf1896d8dc158e",
"timestamp": "",
"source": "github",
"line_count": 1814,
"max_line_length": 80,
"avg_line_length": 39.428886438809265,
"alnum_prop": 0.5700324366646161,
"repo_name": "wavemind/mlgcb",
"id": "093918a4fa8ab77f42a66b856a0c0feae1fbba77",
"size": "72122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/dashboard/unit_lesson_editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31927"
},
{
"name": "JavaScript",
"bytes": "329372"
},
{
"name": "Python",
"bytes": "2274187"
},
{
"name": "Shell",
"bytes": "15633"
}
],
"symlink_target": ""
} |
"""
A reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the
CoreFoundation main loop used by MacOS X.
This is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>}
applications.
"""
__all__ = [
'install',
'CFReactor'
]
import sys
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.posixbase import _NO_FILEDESC
from twisted.python import log
from CoreFoundation import (
CFRunLoopAddSource, CFRunLoopRemoveSource, CFRunLoopGetMain, CFRunLoopRun,
CFRunLoopStop, CFRunLoopTimerCreate, CFRunLoopAddTimer,
CFRunLoopTimerInvalidate, kCFAllocatorDefault, kCFRunLoopCommonModes,
CFAbsoluteTimeGetCurrent)
from CFNetwork import (
CFSocketCreateWithNative, CFSocketSetSocketFlags, CFSocketEnableCallBacks,
CFSocketCreateRunLoopSource, CFSocketDisableCallBacks, CFSocketInvalidate,
kCFSocketWriteCallBack, kCFSocketReadCallBack, kCFSocketConnectCallBack,
kCFSocketAutomaticallyReenableReadCallBack,
kCFSocketAutomaticallyReenableWriteCallBack)
_READ = 0
_WRITE = 1
_preserveSOError = 1 << 6
class _WakerPlus(_Waker):
"""
The normal Twisted waker will simply wake up the main loop, which causes an
iteration to run, which in turn causes L{PosixReactorBase.runUntilCurrent}
to get invoked.
L{CFReactor} has a slightly different model of iteration, though: rather
than have each iteration process the thread queue, then timed calls, then
file descriptors, each callback is run as it is dispatched by the CFRunLoop
observer which triggered it.
So this waker needs to not only unblock the loop, but also make sure the
work gets done; so, it reschedules the invocation of C{runUntilCurrent} to
be immediate (0 seconds from now) even if there is no timed call work to
do.
"""
def doRead(self):
"""
Wake up the loop and force C{runUntilCurrent} to run immediately in the
next timed iteration.
"""
result = _Waker.doRead(self)
self.reactor._scheduleSimulate(True)
return result
class CFReactor(PosixReactorBase):
"""
The CoreFoundation reactor.
You probably want to use this via the L{install} API.
@ivar _fdmap: a dictionary, mapping an integer (a file descriptor) to a
4-tuple of:
- source: a C{CFRunLoopSource}; the source associated with this
socket.
- socket: a C{CFSocket} wrapping the file descriptor.
- descriptor: an L{IReadDescriptor} and/or L{IWriteDescriptor}
provider.
- read-write: a 2-C{list} of booleans: respectively, whether this
descriptor is currently registered for reading or registered for
writing.
@ivar _idmap: a dictionary, mapping the id() of an L{IReadDescriptor} or
L{IWriteDescriptor} to a C{fd} in L{_fdmap}. Implemented in this
manner so that we don't have to rely (even more) on the hashability of
L{IReadDescriptor} providers, and we know that they won't be collected
since these are kept in sync with C{_fdmap}. Necessary because the
.fileno() of a file descriptor may change at will, so we need to be
able to look up what its file descriptor I{used} to be, so that we can
look it up in C{_fdmap}
@ivar _cfrunloop: the L{CFRunLoop} pyobjc object wrapped by this reactor.
@ivar _inCFLoop: Is L{CFRunLoopRun} currently running?
@type _inCFLoop: C{bool}
@ivar _currentSimulator: if a CFTimer is currently scheduled with the CF
run loop to run Twisted callLater calls, this is a reference to it.
Otherwise, it is C{None}
"""
implements(IReactorFDSet)
def __init__(self, runLoop=None, runner=None):
self._fdmap = {}
self._idmap = {}
if runner is None:
runner = CFRunLoopRun
self._runner = runner
if runLoop is None:
runLoop = CFRunLoopGetMain()
self._cfrunloop = runLoop
PosixReactorBase.__init__(self)
def installWaker(self):
"""
Override C{installWaker} in order to use L{_WakerPlus}; otherwise this
should be exactly the same as the parent implementation.
"""
if not self.waker:
self.waker = _WakerPlus(self)
self._internalReaders.add(self.waker)
self.addReader(self.waker)
def _socketCallback(self, cfSocket, callbackType,
ignoredAddress, ignoredData, context):
"""
The socket callback issued by CFRunLoop. This will issue C{doRead} or
C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}
registered with the file descriptor that we are being notified of.
@param cfSocket: The L{CFSocket} which has got some activity.
@param callbackType: The type of activity that we are being notified
of. Either L{kCFSocketReadCallBack} or L{kCFSocketWriteCallBack}.
@param ignoredAddress: Unused, because this is not used for either of
the callback types we register for.
@param ignoredData: Unused, because this is not used for either of the
callback types we register for.
@param context: The data associated with this callback by
L{CFSocketCreateWithNative} (in L{CFReactor._watchFD}). A 2-tuple
of C{(int, CFRunLoopSource)}.
"""
(fd, smugglesrc) = context
if fd not in self._fdmap:
# Spurious notifications seem to be generated sometimes if you
# CFSocketDisableCallBacks in the middle of an event. I don't know
# about this FD, any more, so let's get rid of it.
CFRunLoopRemoveSource(
self._cfrunloop, smugglesrc, kCFRunLoopCommonModes
)
return
why = None
isRead = False
src, skt, readWriteDescriptor, rw = self._fdmap[fd]
try:
if readWriteDescriptor.fileno() == -1:
why = _NO_FILEDESC
else:
isRead = callbackType == kCFSocketReadCallBack
# CFSocket seems to deliver duplicate read/write notifications
# sometimes, especially a duplicate writability notification
# when first registering the socket. This bears further
# investigation, since I may have been mis-interpreting the
# behavior I was seeing. (Running the full Twisted test suite,
# while thorough, is not always entirely clear.) Until this has
# been more thoroughly investigated , we consult our own
# reading/writing state flags to determine whether we should
# actually attempt a doRead/doWrite first. -glyph
if isRead:
if rw[_READ]:
why = log.callWithLogger(
readWriteDescriptor, readWriteDescriptor.doRead)
else:
if rw[_WRITE]:
why = log.callWithLogger(
readWriteDescriptor, readWriteDescriptor.doWrite)
except:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(readWriteDescriptor, why, isRead)
def _watchFD(self, fd, descr, flag):
"""
Register a file descriptor with the L{CFRunLoop}, or modify its state
so that it's listening for both notifications (read and write) rather
than just one; used to implement C{addReader} and C{addWriter}.
@param fd: The file descriptor.
@type fd: C{int}
@param descr: the L{IReadDescriptor} or L{IWriteDescriptor}
@param flag: the flag to register for callbacks on, either
L{kCFSocketReadCallBack} or L{kCFSocketWriteCallBack}
"""
if fd == -1:
raise RuntimeError("Invalid file descriptor.")
if fd in self._fdmap:
src, cfs, gotdescr, rw = self._fdmap[fd]
# do I need to verify that it's the same descr?
else:
ctx = []
ctx.append(fd)
cfs = CFSocketCreateWithNative(
kCFAllocatorDefault, fd,
kCFSocketReadCallBack | kCFSocketWriteCallBack |
kCFSocketConnectCallBack,
self._socketCallback, ctx
)
CFSocketSetSocketFlags(
cfs,
kCFSocketAutomaticallyReenableReadCallBack |
kCFSocketAutomaticallyReenableWriteCallBack |
# This extra flag is to ensure that CF doesn't (destructively,
# because destructively is the only way to do it) retrieve
# SO_ERROR and thereby break twisted.internet.tcp.BaseClient,
# which needs SO_ERROR to tell it whether or not it needs to
# call connect_ex a second time.
_preserveSOError
)
src = CFSocketCreateRunLoopSource(kCFAllocatorDefault, cfs, 0)
ctx.append(src)
CFRunLoopAddSource(self._cfrunloop, src, kCFRunLoopCommonModes)
CFSocketDisableCallBacks(
cfs,
kCFSocketReadCallBack | kCFSocketWriteCallBack |
kCFSocketConnectCallBack
)
rw = [False, False]
self._idmap[id(descr)] = fd
self._fdmap[fd] = src, cfs, descr, rw
rw[self._flag2idx(flag)] = True
CFSocketEnableCallBacks(cfs, flag)
def _flag2idx(self, flag):
"""
Convert a C{kCFSocket...} constant to an index into the read/write
state list (C{_READ} or C{_WRITE}) (the 4th element of the value of
C{self._fdmap}).
@param flag: C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}
@return: C{_READ} or C{_WRITE}
"""
return {kCFSocketReadCallBack: _READ,
kCFSocketWriteCallBack: _WRITE}[flag]
def _unwatchFD(self, fd, descr, flag):
"""
Unregister a file descriptor with the L{CFRunLoop}, or modify its state
so that it's listening for only one notification (read or write) as
opposed to both; used to implement C{removeReader} and C{removeWriter}.
@param fd: a file descriptor
@type fd: C{int}
@param descr: an L{IReadDescriptor} or L{IWriteDescriptor}
@param flag: L{kCFSocketWriteCallBack} L{kCFSocketReadCallBack}
"""
if id(descr) not in self._idmap:
return
if fd == -1:
# need to deal with it in this case, I think.
realfd = self._idmap[id(descr)]
else:
realfd = fd
src, cfs, descr, rw = self._fdmap[realfd]
CFSocketDisableCallBacks(cfs, flag)
rw[self._flag2idx(flag)] = False
if not rw[_READ] and not rw[_WRITE]:
del self._idmap[id(descr)]
del self._fdmap[realfd]
CFRunLoopRemoveSource(self._cfrunloop, src, kCFRunLoopCommonModes)
CFSocketInvalidate(cfs)
def addReader(self, reader):
"""
Implement L{IReactorFDSet.addReader}.
"""
self._watchFD(reader.fileno(), reader, kCFSocketReadCallBack)
def addWriter(self, writer):
"""
Implement L{IReactorFDSet.addWriter}.
"""
self._watchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
def removeReader(self, reader):
"""
Implement L{IReactorFDSet.removeReader}.
"""
self._unwatchFD(reader.fileno(), reader, kCFSocketReadCallBack)
def removeWriter(self, writer):
"""
Implement L{IReactorFDSet.removeWriter}.
"""
self._unwatchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
def removeAll(self):
"""
Implement L{IReactorFDSet.removeAll}.
"""
allDesc = set([descr for src, cfs, descr, rw in self._fdmap.values()])
allDesc -= set(self._internalReaders)
for desc in allDesc:
self.removeReader(desc)
self.removeWriter(desc)
return list(allDesc)
def getReaders(self):
"""
Implement L{IReactorFDSet.getReaders}.
"""
return [descr for src, cfs, descr, rw in self._fdmap.values()
if rw[_READ]]
def getWriters(self):
"""
Implement L{IReactorFDSet.getWriters}.
"""
return [descr for src, cfs, descr, rw in self._fdmap.values()
if rw[_WRITE]]
def _moveCallLaterSooner(self, tple):
"""
Override L{PosixReactorBase}'s implementation of L{IDelayedCall.reset}
so that it will immediately reschedule. Normally
C{_moveCallLaterSooner} depends on the fact that C{runUntilCurrent} is
always run before the mainloop goes back to sleep, so this forces it to
immediately recompute how long the loop needs to stay asleep.
"""
result = PosixReactorBase._moveCallLaterSooner(self, tple)
self._scheduleSimulate()
return result
_inCFLoop = False
def mainLoop(self):
"""
Run the runner (L{CFRunLoopRun} or something that calls it), which runs
the run loop until C{crash()} is called.
"""
self._inCFLoop = True
try:
self._runner()
finally:
self._inCFLoop = False
_currentSimulator = None
def _scheduleSimulate(self, force=False):
"""
Schedule a call to C{self.runUntilCurrent}. This will cancel the
currently scheduled call if it is already scheduled.
@param force: Even if there are no timed calls, make sure that
C{runUntilCurrent} runs immediately (in a 0-seconds-from-now
{CFRunLoopTimer}). This is necessary for calls which need to
trigger behavior of C{runUntilCurrent} other than running timed
calls, such as draining the thread call queue or calling C{crash()}
when the appropriate flags are set.
@type force: C{bool}
"""
if self._currentSimulator is not None:
CFRunLoopTimerInvalidate(self._currentSimulator)
self._currentSimulator = None
timeout = self.timeout()
if force:
timeout = 0.0
if timeout is not None:
fireDate = (CFAbsoluteTimeGetCurrent() + timeout)
def simulate(cftimer, extra):
self._currentSimulator = None
self.runUntilCurrent()
self._scheduleSimulate()
c = self._currentSimulator = CFRunLoopTimerCreate(
kCFAllocatorDefault, fireDate,
0, 0, 0, simulate, None
)
CFRunLoopAddTimer(self._cfrunloop, c, kCFRunLoopCommonModes)
def callLater(self, _seconds, _f, *args, **kw):
"""
Implement L{IReactorTime.callLater}.
"""
delayedCall = PosixReactorBase.callLater(
self, _seconds, _f, *args, **kw
)
self._scheduleSimulate()
return delayedCall
def stop(self):
"""
Implement L{IReactorCore.stop}.
"""
PosixReactorBase.stop(self)
self._scheduleSimulate(True)
def crash(self):
"""
Implement L{IReactorCore.crash}
"""
wasStarted = self._started
PosixReactorBase.crash(self)
if self._inCFLoop:
self._stopNow()
else:
if wasStarted:
self.callLater(0, self._stopNow)
def _stopNow(self):
"""
Immediately stop the CFRunLoop (which must be running!).
"""
CFRunLoopStop(self._cfrunloop)
def iterate(self, delay=0):
"""
Emulate the behavior of C{iterate()} for things that want to call it,
by letting the loop run for a little while and then scheduling a timed
call to exit it.
"""
self.callLater(delay, self._stopNow)
self.mainLoop()
def install(runLoop=None, runner=None):
"""
Configure the twisted mainloop to be run inside CFRunLoop.
@param runLoop: the run loop to use.
@param runner: the function to call in order to actually invoke the main
loop. This will default to L{CFRunLoopRun} if not specified. However,
this is not an appropriate choice for GUI applications, as you need to
run NSApplicationMain (or something like it). For example, to run the
Twisted mainloop in a PyObjC application, your C{main.py} should look
something like this::
from PyObjCTools import AppHelper
from twisted.internet.cfreactor import install
install(runner=AppHelper.runEventLoop)
# initialize your application
reactor.run()
@return: The installed reactor.
@rtype: L{CFReactor}
"""
reactor = CFReactor(runLoop=runLoop, runner=runner)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
| {
"content_hash": "1f152da6fb5f85932bbfacc609d091c2",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 79,
"avg_line_length": 35.74096385542169,
"alnum_prop": 0.5991909657845946,
"repo_name": "timkrentz/SunTracker",
"id": "490e6e70bf4644edc2f6ccafa2798f9991f3817d",
"size": "17932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/internet/cfreactor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
"""
COCO Keypoints 2016 process functions.
"""
from __future__ import print_function, division
import os
from collections import OrderedDict
import numpy as np
import progressbar
from dbcollection.datasets import BaseTask
from dbcollection.utils.string_ascii import convert_str_to_ascii as str2ascii
from dbcollection.utils.pad import pad_list, squeeze_list
from dbcollection.utils.file_load import load_json
from dbcollection.utils.hdf5 import hdf5_write_data
from .load_data_test import load_data_test
class Keypoints2016(BaseTask):
"""COCO Keypoints (2016) preprocessing functions."""
# metadata filename
filename_h5 = 'keypoint_2016'
image_dir_path = {
"train": 'train2014',
"val": 'val2014',
"test": 'test2015',
"test-dev": 'test2015'
}
annotation_path = {
"train": os.path.join('annotations', 'person_keypoints_train2014.json'),
"val": os.path.join('annotations', 'person_keypoints_val2014.json'),
"test": os.path.join('annotations', 'image_info_test2015.json'),
"test-dev": os.path.join('annotations', 'image_info_test-dev2015.json')
}
keypoints_list = {
'nose', # -- 1
'left_eye', # -- 2
'right_eye', # -- 3
'left_ear', # -- 4
'right_ear', # -- 5
'left_shoulder', # -- 6
'right_shoulder', # -- 7
'left_elbow', # -- 8
'right_elbow', # -- 9
'left_wrist', # -- 10
'right_wrist', # -- 11
'left_hip', # -- 12
'right_hip', # -- 13
'left_knee', # -- 14
'right_knee', # -- 15
'left_ankle', # -- 16
'right_ankle' # -- 17
}
def parse_image_annotations(self, image_dir, annotations):
"""
Parse image annotations data to a dictionary and lists
"""
filename_ids = {}
for i, annot in enumerate(annotations['images']):
filename_ids[annot['file_name']] = i
# order image data by file name
images_annot_by_fname = {}
for i, annot in enumerate(annotations['images']):
images_annot_by_fname[annot['file_name']] = {
"file_name": os.path.join(image_dir, annot['file_name']),
"width": annot['width'],
"height": annot['height'],
"id": annot['id'],
"coco_url": annot['coco_url'],
}
# order image data by file id
images_fname_by_id = {}
for i, annot in enumerate(annotations['images']):
images_fname_by_id[annot['id']] = annot['file_name']
return filename_ids, images_annot_by_fname, images_fname_by_id
def parse_category_annotations(self, annotations):
"""
Parse category annotations data to a dictionary and lists
"""
categories = {}
category_list, supercategory_list, category_id = [], [], []
for i, annot in enumerate(annotations['categories']):
categories[annot['id']] = {
"name": annot['name'],
"supercategory": annot['supercategory'],
"id": annot['id']
}
category_id.append(annot['id'])
category_list.append(annot['name'])
supercategory_list.append(annot['supercategory'])
supercategory_list = list(set(supercategory_list))
return categories, category_list, supercategory_list, category_id
def load_data_trainval(self, set_name, image_dir, annotation_path):
"""
Load train+val data
"""
data = {}
# load annotations file
if self.verbose:
print(' > Loading annotation file: ' + annotation_path)
annotations = load_json(annotation_path)
# progressbar
if self.verbose:
prgbar = progressbar.ProgressBar(max_value=len(annotations['annotations']))
# parse annotations
# images
if self.verbose:
print(' > Processing image annotations... ')
# get all image filenames + ids into a list
filename_ids, images_annot_by_fname, images_fname_by_id = self.parse_image_annotations(
image_dir, annotations)
if self.verbose:
print(' > Processing category annotations... ')
parsed_annots = self.parse_category_annotations(annotations)
categories, category_list, supercategory_list, category_id = parsed_annots
skeleton = annotations['categories'][0]['skeleton']
keypoints = annotations['categories'][0]['keypoints']
if self.verbose:
print(' > Processing data annotations... ')
# group annotations by file name
annotation_id_dict = {}
for i, annot in enumerate(annotations['annotations']):
filename = images_fname_by_id[annot['image_id']]
category_annot = categories[annot['category_id']]
obj_id = annot["id"]
annotation_id_dict[obj_id] = i
if isinstance(annot["segmentation"], list):
segmentation = squeeze_list(annot["segmentation"], -1) # squeeze list
elif isinstance(annot["segmentation"]['counts'], list):
segmentation = annot["segmentation"]["counts"]
else:
segmentation = annot["segmentation"]
# convert from [x,y,w,h] to [xmin,ymin,xmax,ymax]
bbox = [annot['bbox'][0], # xmin
annot['bbox'][1], # ymin
annot['bbox'][0] + annot['bbox'][2] - 1, # ymax
annot['bbox'][1] + annot['bbox'][3] - 1] # ymax
obj = {
"category": category_annot['name'],
"supercategory": category_annot['supercategory'],
"area": annot['area'],
"iscrowd": annot['iscrowd'],
"segmentation": segmentation,
"bbox": bbox,
"num_keypoints": annot['num_keypoints'],
"keypoints": annot['keypoints'],
"image_id": annot['image_id'],
"category_id": annot['category_id'],
"id": annot["id"],
"annotation_id": i
}
# add annotations to the image data
try:
images_annot_by_fname[filename]["object"].update({obj_id: obj})
except KeyError:
images_annot_by_fname[filename]["object"] = {obj_id: obj}
# update progressbar
if self.verbose:
prgbar.update(i)
# reset progressbar
if self.verbose:
prgbar.finish()
return {set_name: [OrderedDict(sorted(images_annot_by_fname.items())),
annotations,
annotation_id_dict,
category_list,
supercategory_list,
category_id,
filename_ids,
images_fname_by_id,
skeleton,
keypoints]}
def load_data(self):
"""
Load data of the dataset (create a generator).
"""
for set_name in self.image_dir_path:
if self.verbose:
print('\n> Loading data files for the set: ' + set_name)
# image dir
image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])
# annotation file path
annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])
if 'test' in set_name:
yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)
else:
yield self.load_data_trainval(set_name, image_dir, annot_filepath)
def process_set_metadata(self, data, set_name):
"""
Saves the metadata of a set.
"""
hdf5_handler = self.hdf5_manager.get_group(set_name)
image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])
if 'test' in set_name:
is_test = True
data_ = data[0]
filename_ids = data[1]
annotations = data[2]
category = data[3]
supercategory = data[4]
category_id = data[5]
else:
is_test = False
data_ = data[0]
annotations = data[1]
annotation_id_dict = data[2]
category = data[3]
supercategory = data[4]
category_id = data[5]
filename_ids = data[6]
images_fname_by_id = data[7]
skeleton = data[8]
keypoints = data[9]
keypoints_ = str2ascii(keypoints)
skeleton_ = np.array(pad_list(skeleton, -1), dtype=np.uint8)
category_ = str2ascii(category)
supercategory_ = str2ascii(supercategory)
image_filenames = []
coco_urls = []
width = []
height = []
image_id = []
annotation_id = []
area = []
iscrowd = [0, 1]
segmentation = []
num_keypoints = list(range(0, 17 + 1))
keypoints_list = []
bbox = []
object_id = []
# coco id lists
# These are order by entry like in the annotation files.
# I.e., coco_images_ids[0] has the object_id with the file_name, id, height, etc.
# as coco_annotation_file[set_name]["images"][0]
coco_images_ids = []
coco_categories_ids = []
coco_annotations_ids = []
if is_test:
object_fields = ["image_filenames", "coco_urls", "width", "height"]
else:
object_fields = ["image_filenames", "coco_urls", "width", "height",
"category", "supercategory", "boxes", "area",
"iscrowd", "segmentation",
"image_id", "category_id", "annotation_id",
"num_keypoints", "keypoints"]
list_boxes_per_image = []
list_keypoints_per_image = []
list_object_ids_per_image = []
list_image_filenames_per_num_keypoints = []
list_object_ids_per_keypoint = [] # body part
if self.verbose:
print('> Adding data to default group:')
prgbar = progressbar.ProgressBar(max_value=len(data_))
counter = 0
tmp_coco_annotations_ids = {}
for i, key in enumerate(data_):
annotation = data_[key]
image_filenames.append(annotation["file_name"])
width.append(annotation["width"])
height.append(annotation["height"])
coco_urls.append(annotation["coco_url"])
image_id.append(annotation["id"])
if is_test:
# *** object_id ***
# [filename, coco_url, width, height]
object_id.append([i, i, i, i])
list_object_ids_per_image.append([i])
else:
boxes_per_image = []
if "object" in annotation:
for j, obj_idx in enumerate(annotation["object"]):
obj = annotation["object"][obj_idx]
area.append(obj["area"])
bbox.append(obj["bbox"])
annotation_id.append(obj["id"])
segmentation.append(obj["segmentation"])
keypoints_list.append(obj["keypoints"])
# *** object_id ***
# [filename, coco_url, width, height,
# category, supercategory,
# bbox, area, iscrowd, segmentation,
# "image_id", "category_id", "annotation_id"
# "num_keypoints", "keypoints"]
object_id.append([i, i, i, i,
category.index(obj["category"]), supercategory.index(
obj["supercategory"]),
counter, counter, obj["iscrowd"], counter,
i, category.index(obj["category"]), counter,
obj["num_keypoints"], counter])
boxes_per_image.append(counter)
# temporary var
tmp_coco_annotations_ids[obj["id"]] = counter
# update counter
counter += 1
list_boxes_per_image.append(boxes_per_image)
list_keypoints_per_image.append(boxes_per_image)
list_object_ids_per_image.append(boxes_per_image)
# update progressbar
if self.verbose:
prgbar.update(i)
# update progressbar
if self.verbose:
prgbar.finish()
if self.verbose:
print('> Processing coco lists:')
prgbar = progressbar.ProgressBar(max_value=len(annotations['images']))
# set coco id lists
for i, annot in enumerate(annotations['images']):
fname_id = image_filenames.index(os.path.join(image_dir, annot['file_name']))
coco_images_ids.append(fname_id)
# update progressbar
if self.verbose:
prgbar.update(i)
# update progressbar
if self.verbose:
prgbar.finish()
coco_categories_ids = list(range(len(category)))
if not is_test:
if self.verbose:
prgbar = progressbar.ProgressBar(max_value=len(annotations['annotations']))
for i, annot in enumerate(annotations['annotations']):
annot_id = tmp_coco_annotations_ids[annot['id']]
coco_annotations_ids.append(annot_id)
# update progressbar
if self.verbose:
prgbar.update(i)
# update progressbar
if self.verbose:
prgbar.finish()
# process lists
if not is_test:
if self.verbose:
print('> Processing lists...')
for i in range(len(keypoints)):
imgs_per_num = [val[0] for _, val in enumerate(object_id) if val[8] == i]
imgs_per_num = list(set(imgs_per_num)) # get unique values
imgs_per_num.sort()
list_image_filenames_per_num_keypoints.append(imgs_per_num)
for i in range(len(keypoints)):
objs_per_keypoint = [j for j, val in enumerate(
keypoints_list) if val[i * 3] > 0 or val[i * 3 + 1] > 0]
objs_per_keypoint = list(set(objs_per_keypoint)) # get unique values
objs_per_keypoint.sort()
list_object_ids_per_keypoint.append(objs_per_keypoint)
hdf5_write_data(hdf5_handler, 'image_filenames',
str2ascii(image_filenames), dtype=np.uint8,
fillvalue=0)
hdf5_write_data(hdf5_handler, 'coco_urls',
str2ascii(coco_urls), dtype=np.uint8,
fillvalue=0)
hdf5_write_data(hdf5_handler, 'width',
np.array(width, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'height',
np.array(height, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'category',
category_, dtype=np.uint8,
fillvalue=0)
hdf5_write_data(hdf5_handler, 'supercategory',
supercategory_, dtype=np.uint8,
fillvalue=0)
hdf5_write_data(hdf5_handler, 'image_id',
np.array(image_id, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'category_id',
np.array(category_id, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'object_ids',
np.array(object_id, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'object_fields',
str2ascii(object_fields), dtype=np.uint8,
fillvalue=0)
hdf5_write_data(hdf5_handler, 'coco_images_ids',
np.array(coco_images_ids, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'coco_categories_ids',
np.array(coco_categories_ids, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'list_object_ids_per_image',
np.array(pad_list(list_object_ids_per_image, -1), dtype=np.int32),
fillvalue=-1)
if not is_test:
hdf5_write_data(hdf5_handler, 'annotation_id',
np.array(annotation_id, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'keypoint_names',
keypoints_, dtype=np.uint8,
fillvalue=0)
hdf5_write_data(hdf5_handler, 'skeleton',
skeleton_, dtype=np.uint8,
fillvalue=0)
hdf5_write_data(hdf5_handler, 'boxes',
np.array(bbox, dtype=np.float),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'iscrowd',
np.array(iscrowd, dtype=np.uint8),
fillvalue=-1)
nrows = len(segmentation)
ncols = max([len(l) for l in segmentation])
dset = hdf5_handler.create_dataset('segmentation',
(nrows, ncols),
dtype=np.float,
chunks=True,
compression="gzip",
compression_opts=4,
fillvalue=-1)
if self.verbose:
print(' -- Saving segmentation masks to disk (this will take some time)')
prgbar = progressbar.ProgressBar(max_value=nrows)
for i in range(nrows):
dset[i, :len(segmentation[i])] = np.array(segmentation[i], dtype=np.float)
if self.verbose:
prgbar.update(i)
if self.verbose:
prgbar.finish()
hdf5_write_data(hdf5_handler, 'area',
np.array(area, dtype=np.int32),
fillvalue=-1)
hdf5_write_data(hdf5_handler, 'num_keypoints',
np.array(num_keypoints, dtype=np.uint8),
fillvalue=0)
hdf5_write_data(hdf5_handler, 'keypoints',
np.array(keypoints_list, dtype=np.int32),
fillvalue=0)
hdf5_write_data(hdf5_handler, 'coco_annotations_ids',
np.array(coco_annotations_ids, dtype=np.int32),
fillvalue=-1)
pad_value = -1
hdf5_write_data(hdf5_handler, 'list_boxes_per_image',
np.array(pad_list(list_boxes_per_image, pad_value), dtype=np.int32),
fillvalue=pad_value)
hdf5_write_data(hdf5_handler, 'list_keypoints_per_image',
np.array(pad_list(list_keypoints_per_image, pad_value), dtype=np.int32),
fillvalue=pad_value)
hdf5_write_data(hdf5_handler, 'list_image_filenames_per_num_keypoints',
np.array(pad_list(list_image_filenames_per_num_keypoints,
pad_value), dtype=np.int32),
fillvalue=pad_value)
hdf5_write_data(hdf5_handler, 'list_object_ids_per_keypoint',
np.array(pad_list(list_object_ids_per_keypoint,
pad_value), dtype=np.int32),
fillvalue=pad_value)
| {
"content_hash": "ee97e7c075f9133bb208311a8a456cca",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 100,
"avg_line_length": 39.87743190661479,
"alnum_prop": 0.4968044104015222,
"repo_name": "farrajota/dbcollection",
"id": "4a48ebe82db260adcb7e8820fe798a1ef97a6fb0",
"size": "20497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dbcollection/datasets/coco/keypoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "Jupyter Notebook",
"bytes": "21468"
},
{
"name": "Makefile",
"bytes": "1692"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "1620582"
}
],
"symlink_target": ""
} |
from org.myrobotlab.image import Util
# start a opencv service
opencv = Runtime.start("opencv","OpenCV")
#gui.setActiveTab("opencv")
# add python as a listener to OpenCV data
# this tells the framework - whenever opencv.publishOpenCVData is invoked
# python.onOpenCVData will get called
python = Runtime.start("python","Python")
python.subscribe("opencv", "publishOpenCVData")
# call back - all data from opencv will come back to
# this method
def onOpenCVData(data):
# check for a bounding box
if data.getBoundingBoxArray() != None:
for box in data.getBoundingBoxArray():
print("bounding box", box.x, box.y, box.width, box.height)
# to capture from an image on the file system
# opencv.captureFromImageFile("C:\Users\grperry\Desktop\mars.jpg")
# not for you, it's for test
if ('virtual' in globals() and virtual):
opencv.setMinDelay(500)
opencv.setFrameGrabberType("org.bytedeco.javacv.FFmpegFrameGrabber")
opencv.setInputSource("file")
opencv.setInputFileName(Util.getRessourceDir()+"OpenCV/testData/monkeyFace.mp4")
opencv.capture()
#### LKOpticalTrack ####################
# experiment with Lucas Kanade optical flow/tracking
# adds the filter and one tracking point
opencv.addFilter("LKOpticalTrack")
opencv.setDisplayFilter("LKOpticalTrack")
# attempt to set a sample point in the middle
# of the video stream - you can
opencv.invokeFilterMethod("LKOpticalTrack","samplePoint", 0.5, 0.5)
sleep(4)
opencv.removeFilters()
opencv.addFilter("FaceDetect")
opencv.setDisplayFilter("FaceDetect")
# attempt to set a sample point in the middle
# of the video stream - you can
sleep(4)
opencv.removeFilters()
#### PyramidDown ####################
# scale the view down - faster since updating the screen is
# relatively slow
opencv.addFilter("PyramidDown")
opencv.setDisplayFilter("PyramidDown")
sleep(4)
# adding a second pyramid down filter - we need
# a unique name - so we'll call it PyramidDown2
opencv.addFilter("PyramidDown2","PyramidDown")
opencv.setDisplayFilter("PyramidDown2")
sleep(4)
opencv.removeFilters()
#### Canny ########################
# adding a canny filter
opencv.addFilter("Canny")
opencv.setDisplayFilter("Canny")
sleep(4)
canny = opencv.getFilter("Canny")
# changing parameters
canny.apertureSize = 3
canny.lowThreshold = 10.0
canny.highThreshold = 200.0
sleep(2)
canny.apertureSize = 5
canny.lowThreshold = 10.0
canny.highThreshold = 100.0
sleep(4)
opencv.removeFilters()
opencv.stopCapture() | {
"content_hash": "d249d4a0a7a39d9ef1986ad4e9f8f3d5",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 82,
"avg_line_length": 27.988636363636363,
"alnum_prop": 0.7401542833942347,
"repo_name": "MyRobotLab/pyrobotlab",
"id": "834922ae5140f6aa57b17c54841c1d84d46aa301",
"size": "2463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service/OpenCV.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1827"
},
{
"name": "C",
"bytes": "126258"
},
{
"name": "C++",
"bytes": "373018"
},
{
"name": "Java",
"bytes": "156911"
},
{
"name": "Processing",
"bytes": "17022"
},
{
"name": "Python",
"bytes": "3309101"
},
{
"name": "Shell",
"bytes": "4635"
},
{
"name": "VBA",
"bytes": "11115"
}
],
"symlink_target": ""
} |
"""
Test SBType for ObjC classes.
"""
from __future__ import print_function
import os
import time
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ObjCSBTypeTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.line = line_number("main.m", '// Break at this line')
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test(self):
"""Test SBType for ObjC classes."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation("main.m", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Get Frame #0.
self.assertTrue(process.GetState() == lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint condition")
aBar = self.frame().FindVariable("aBar")
aBarType = aBar.GetType()
self.assertTrue(aBarType.IsValid(), "Bar should be a valid data type")
self.assertTrue(
aBarType.GetName() == "Bar *",
"Bar has the right name")
self.assertTrue(
aBarType.GetNumberOfDirectBaseClasses() == 1,
"Bar has a superclass")
aFooType = aBarType.GetDirectBaseClassAtIndex(0)
self.assertTrue(aFooType.IsValid(), "Foo should be a valid data type")
self.assertTrue(aFooType.GetName() == "Foo", "Foo has the right name")
self.assertTrue(aBarType.GetNumberOfFields() == 1, "Bar has a field")
aBarField = aBarType.GetFieldAtIndex(0)
self.assertTrue(
aBarField.GetName() == "_iVar",
"The field has the right name")
| {
"content_hash": "05603acfc6d86d099320833a39341eb2",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 31.93243243243243,
"alnum_prop": 0.6326703343207787,
"repo_name": "youtube/cobalt_sandbox",
"id": "fd3bfa858e1253c840259a013d2d29d1def19ed2",
"size": "2363",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/python_api/objc_type/TestObjCType.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_product_status'),
]
operations = [
migrations.AlterField(
model_name='product',
name='status',
field=models.CharField(default=b'novo', max_length=20),
preserve_default=True,
),
]
| {
"content_hash": "6c9c260614de7627655c837ff08b43f0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 22.31578947368421,
"alnum_prop": 0.5872641509433962,
"repo_name": "vitorfs/sea-serpent",
"id": "b0ea1df7fdcce0b76b091d822d37ddcc3e73bcf0",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seaserpent/core/migrations/0005_auto_20141201_2254.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "216"
},
{
"name": "JavaScript",
"bytes": "709"
},
{
"name": "Python",
"bytes": "27419"
}
],
"symlink_target": ""
} |
import math
def make(filename):
file = open(filename, "r")
linenum = 0
verts = 0
edges = 0
graph = None
for line in file:
# print ("line " + line)
values = line.split("\t")
# print ("values " + str(values))
# strip the wack n if present
try:
for i in values:
# print ("i " + i)
i = int(str(i).strip("\n"))
except Exception as ex:
print("\nError parsing the graph file. This is probably from having spaces instead of tabs.")
print("Exiting...\n")
# print(ex)
raise ex
# if first get graph verts n edges
if linenum == 0:
verts = values[0]
edges = values[1]
graph = Graph(int(verts), int(edges))
else: # else connect the verts
a = int(values[0])
b = int(values[1])
graph.connect(a, b)
linenum += 1
return graph
class GraphException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
### not used, just messing with python overloading
class Matrix:
def __init__(self, r, c):
self.rows = r
self.cols = c
self.data = [[ 0 for x in range(self.cols)] for y in range(self.rows)]
def __getitem__(self, key):
print ("key: " + str(key))
return self.data[key]
def __setitem__(self, key, value):
print ("set key: " + str(key) + " val: " + str(value))
self.data[key] = value
def output(self):
for i in range(self.rows):
row = ""
for j in range (self.cols):
row += (str(self.data[i][j]) + " ")
print ( row + "\n")
def set(self, a, b, val):
self.data[a][b] = val
def fill(self, value):
for i in range(self.rows):
for j in range(self.cols):
self.set(i,j,value)
class Graph:
def __init__(self, vs, es):
self.verts = vs
self.edges = es
self.data = [[ 0 for x in range(self.verts)] for y in range(self.verts)]
def __getitem__(self, key):
return self.data[key]
def output(self):
for i in range(self.verts):
row = ""
for j in range (self.verts):
row += (str(self.data[i][j]) + " ")
print ( row + "\n")
def connect(self, a, b):
self.data[a][b] = 1
self.data[b][a] = 1
def remove(self, a, b):
self.data[a][b] = 0
self.data[b][a] = 0
def density(self):
if ( self.edges == 0 and self.verts == 0):
return 0
else:
top = 2 * float(self.edges)
bottom = float(self.verts) * float(self.verts - 1)
return round((top/bottom), 5)
def degree(self, switch):
target = 0
if (switch == "min"):
target = self.verts - 1
if ( target < 0 ):
target = 0
for i in range(self.verts):
tmp = 0
for j in range(self.verts):
tmp += self.data[i][j]
if (switch == "max"):
if (tmp > target):
target = tmp
elif(switch == "min"):
if ( tmp < target):
target = tmp
else:
print (GraphException("Invalid switch passed to degree."))
return target
| {
"content_hash": "cbaa35d3efe0d0136f5a07e93234119f",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 106,
"avg_line_length": 29.818181818181817,
"alnum_prop": 0.45454545454545453,
"repo_name": "jeremy24/494-graph-algos",
"id": "7a6ce82b1d18736ccb5d2f10bd7b784f01c075a5",
"size": "3608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/hw1/graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "87"
},
{
"name": "C",
"bytes": "114"
},
{
"name": "Haskell",
"bytes": "1722"
},
{
"name": "Python",
"bytes": "139237"
}
],
"symlink_target": ""
} |
import sys
import logging
from io import BytesIO
from casacore.measures import measures
from tkp.utility.coordinates import unix2julian
from tkp.utility.redirect_stream import redirect_stream
logger = logging.getLogger(__name__)
targets = { 'CasA': {'ra' : 6.123487680622104, 'dec' : 1.0265153995604648},
'CygA': {'ra' : 5.233686575770755, 'dec' : 0.7109409582180791},
'TauA': {'ra' : 1.4596748493730913, 'dec' : 0.38422502335921294},
'HerA': {'ra' : 4.4119087330382163, 'dec' : 0.087135562905816893},
'VirA': {'ra' : 3.276086511413598, 'dec' : 0.21626589533567378},
'SUN': None,
'JUPITER': None,
}
def check_for_valid_ephemeris(measures):
"""
Checks whether the ephemeris data in use by ``measures`` is valid.
``measures`` should already have a valid reference frame.
"""
# Note that we need to catch and parse the standard error produced by
# casacore: there doesn't seem to be any other way of figuring this out.
casacore_stderr = BytesIO()
with redirect_stream(sys.__stderr__, casacore_stderr):
# We assume the ephemeris is valid if it has position of the sun.
measures.separation(
measures.direction("SUN"), measures.direction("SUN")
)
if "WARN" in casacore_stderr.getvalue():
# casacore sends a warning to stderr if the ephemeris is invalid
return False
else:
return True
def is_bright_source_near(accessor, distance=20):
"""
Checks if there is any of the bright radio sources defined in targets
near the center of the image.
:param accessor: a TKP accessor
:param distance: maximum allowed distance of a bright source (in degrees)
:returns: False if not bright source is near, description of source if a
bright source is near
"""
#TODO: this function should be split up and tested more atomically
# The measures object is our interface to casacore
m = measures()
# First, you need to set the reference frame -- ie, the time
# -- used for the calculations to come. Time as MJD in seconds.
starttime = int(accessor.taustart_ts.strftime("%s"))
starttime_mjd = unix2julian(starttime)
m.do_frame(m.epoch("UTC", "%ss" % starttime_mjd))
# Now check and ensure the ephemeris in use is actually valid for this
# data.
if not check_for_valid_ephemeris(m):
logger.warn("Bright source check failed due to invalid ephemeris")
return "Invalid ephemeris"
# Second, you need to set your image pointing.
pointing = m.direction(
"J2000", "%sdeg" % accessor.centre_ra, "%sdeg" % accessor.centre_decl
)
for name, position in targets.items():
if not position:
direction = m.direction(name)
else:
direction = m.direction(
"J2000", "%srad" % position['ra'], "%srad" % position['dec']
)
separation = m.separation(pointing, direction).get_value("deg")
if separation < distance:
return "Pointing is %s degrees from %s." % (separation, name)
return False
| {
"content_hash": "8b310ea2fedd00fcccda1b62606931b1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 38.97530864197531,
"alnum_prop": 0.6430155210643016,
"repo_name": "transientskp/tkp",
"id": "cf46ccda12242a85365a14c9c5bba24a33aad53c",
"size": "3157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tkp/quality/brightsource.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "1887"
},
{
"name": "Python",
"bytes": "960959"
},
{
"name": "Shell",
"bytes": "377"
}
],
"symlink_target": ""
} |
"""Tests for ceilometer/publisher/messaging.py
"""
import datetime
import eventlet
import mock
from oslo.config import fixture as fixture_config
import oslo.messaging
import oslo.messaging._drivers.common
from oslo.utils import netutils
import testscenarios.testcase
from ceilometer import messaging
from ceilometer.openstack.common import context
from ceilometer.publisher import messaging as msg_publisher
from ceilometer import sample
from ceilometer.tests import base as tests_base
class BasePublisherTestCase(tests_base.BaseTestCase):
test_data = [
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test3',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def setUp(self):
super(BasePublisherTestCase, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
self.published = []
class RpcOnlyPublisherTest(BasePublisherTestCase):
def test_published_no_mock(self):
publisher = msg_publisher.RPCPublisher(
netutils.urlsplit('rpc://'))
endpoint = mock.MagicMock(['record_metering_data'])
collector = messaging.get_rpc_server(
self.transport, self.CONF.publisher_rpc.metering_topic, endpoint)
endpoint.record_metering_data.side_effect = (lambda *args, **kwds:
collector.stop())
collector.start()
eventlet.sleep()
publisher.publish_samples(context.RequestContext(),
self.test_data)
collector.wait()
class Matcher(object):
@staticmethod
def __eq__(data):
for i, sample_item in enumerate(data):
if sample_item['counter_name'] != self.test_data[i].name:
return False
return True
endpoint.record_metering_data.assert_called_once_with(
mock.ANY, data=Matcher())
def test_publish_target(self):
publisher = msg_publisher.RPCPublisher(
netutils.urlsplit('rpc://?target=custom_procedure_call'))
cast_context = mock.MagicMock()
with mock.patch.object(publisher.rpc_client, 'prepare') as prepare:
prepare.return_value = cast_context
publisher.publish_samples(mock.MagicMock(),
self.test_data)
prepare.assert_called_once_with(
topic=self.CONF.publisher_rpc.metering_topic)
cast_context.cast.assert_called_once_with(
mock.ANY, 'custom_procedure_call', data=mock.ANY)
def test_published_with_per_meter_topic(self):
publisher = msg_publisher.RPCPublisher(
netutils.urlsplit('rpc://?per_meter_topic=1'))
with mock.patch.object(publisher.rpc_client, 'prepare') as prepare:
publisher.publish_samples(mock.MagicMock(),
self.test_data)
class MeterGroupMatcher(object):
def __eq__(self, meters):
return len(set(meter['counter_name']
for meter in meters)) == 1
topic = self.CONF.publisher_rpc.metering_topic
expected = [mock.call(topic=topic),
mock.call().cast(mock.ANY, 'record_metering_data',
data=mock.ANY),
mock.call(topic=topic + '.test'),
mock.call().cast(mock.ANY, 'record_metering_data',
data=MeterGroupMatcher()),
mock.call(topic=topic + '.test2'),
mock.call().cast(mock.ANY, 'record_metering_data',
data=MeterGroupMatcher()),
mock.call(topic=topic + '.test3'),
mock.call().cast(mock.ANY, 'record_metering_data',
data=MeterGroupMatcher())]
self.assertEqual(expected, prepare.mock_calls)
class TestPublisher(testscenarios.testcase.WithScenarios,
BasePublisherTestCase):
scenarios = [
('notifier', dict(protocol="notifier",
publisher_cls=msg_publisher.NotifierPublisher)),
('rpc', dict(protocol="rpc",
publisher_cls=msg_publisher.RPCPublisher)),
]
def test_published_concurrency(self):
"""Test concurrent access to the local queue of the rpc publisher."""
publisher = self.publisher_cls(
netutils.urlsplit('%s://' % self.protocol))
with mock.patch.object(publisher, '_send') as fake_send:
def fake_send_wait(ctxt, topic, meters):
fake_send.side_effect = mock.Mock()
# Sleep to simulate concurrency and allow other threads to work
eventlet.sleep(0)
fake_send.side_effect = fake_send_wait
job1 = eventlet.spawn(publisher.publish_samples,
mock.MagicMock(), self.test_data)
job2 = eventlet.spawn(publisher.publish_samples,
mock.MagicMock(), self.test_data)
job1.wait()
job2.wait()
self.assertEqual('default', publisher.policy)
self.assertEqual(2, len(fake_send.mock_calls))
self.assertEqual(0, len(publisher.local_queue))
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_no_policy(self, mylog):
publisher = self.publisher_cls(
netutils.urlsplit('%s://' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
oslo.messaging._drivers.common.RPCException,
publisher.publish_samples,
mock.MagicMock(), self.test_data)
self.assertTrue(mylog.info.called)
self.assertEqual('default', publisher.policy)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
mock.ANY, self.CONF.publisher_rpc.metering_topic,
mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_policy_block(self, mylog):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=default' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
oslo.messaging._drivers.common.RPCException,
publisher.publish_samples,
mock.MagicMock(), self.test_data)
self.assertTrue(mylog.info.called)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
mock.ANY, self.CONF.publisher_rpc.metering_topic,
mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_policy_incorrect(self, mylog):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=notexist' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
oslo.messaging._drivers.common.RPCException,
publisher.publish_samples,
mock.MagicMock(), self.test_data)
self.assertTrue(mylog.warn.called)
self.assertEqual('default', publisher.policy)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
mock.ANY, self.CONF.publisher_rpc.metering_topic,
mock.ANY)
def test_published_with_policy_drop_and_rpc_down(self):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=drop' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
publisher.publish_samples(mock.MagicMock(),
self.test_data)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
mock.ANY, self.CONF.publisher_rpc.metering_topic,
mock.ANY)
def test_published_with_policy_queue_and_rpc_down(self):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
publisher.publish_samples(mock.MagicMock(),
self.test_data)
self.assertEqual(1, len(publisher.local_queue))
fake_send.assert_called_once_with(
mock.ANY, self.CONF.publisher_rpc.metering_topic,
mock.ANY)
def test_published_with_policy_queue_and_rpc_down_up(self):
self.rpc_unreachable = True
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
publisher.publish_samples(mock.MagicMock(),
self.test_data)
self.assertEqual(1, len(publisher.local_queue))
fake_send.side_effect = mock.MagicMock()
publisher.publish_samples(mock.MagicMock(),
self.test_data)
self.assertEqual(0, len(publisher.local_queue))
topic = self.CONF.publisher_rpc.metering_topic
expected = [mock.call(mock.ANY, topic, mock.ANY),
mock.call(mock.ANY, topic, mock.ANY),
mock.call(mock.ANY, topic, mock.ANY)]
self.assertEqual(expected, fake_send.mock_calls)
def test_published_with_policy_sized_queue_and_rpc_down(self):
publisher = self.publisher_cls(netutils.urlsplit(
'%s://?policy=queue&max_queue_length=3' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
for i in range(0, 5):
for s in self.test_data:
s.source = 'test-%d' % i
publisher.publish_samples(mock.MagicMock(),
self.test_data)
self.assertEqual(3, len(publisher.local_queue))
self.assertEqual(
'test-2',
publisher.local_queue[0][2][0]['source']
)
self.assertEqual(
'test-3',
publisher.local_queue[1][2][0]['source']
)
self.assertEqual(
'test-4',
publisher.local_queue[2][2][0]['source']
)
def test_published_with_policy_default_sized_queue_and_rpc_down(self):
publisher = self.publisher_cls(
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = oslo.messaging._drivers.common.RPCException()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
for i in range(0, 2000):
for s in self.test_data:
s.source = 'test-%d' % i
publisher.publish_samples(mock.MagicMock(),
self.test_data)
self.assertEqual(1024, len(publisher.local_queue))
self.assertEqual(
'test-976',
publisher.local_queue[0][2][0]['source']
)
self.assertEqual(
'test-1999',
publisher.local_queue[1023][2][0]['source']
)
| {
"content_hash": "bacf1c1f287feaa2125a631633d38b90",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 79,
"avg_line_length": 40.78260869565217,
"alnum_prop": 0.562544420753376,
"repo_name": "luogangyi/Ceilometer-oVirt",
"id": "7eecca6054783aaf8176a6fed68dc4eb699aa662",
"size": "14772",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable/juno",
"path": "build/lib/ceilometer/tests/publisher/test_messaging_publisher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5438675"
},
{
"name": "Shell",
"bytes": "1304"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="sunburst.marker.colorbar.tickformatstop",
**kwargs,
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| {
"content_hash": "7a2e4eb7c15d74523d3ac29bb6c38144",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 67,
"avg_line_length": 29.1875,
"alnum_prop": 0.588865096359743,
"repo_name": "plotly/plotly.py",
"id": "44239fd2d5992347f51d6c71b0fe7726de6409b8",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sunburst/marker/colorbar/tickformatstop/_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path, environ
import sys
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'docs/description.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, 'VERSION'), mode='r', encoding='utf-8') as version_file:
version = version_file.read().strip()
setup(
name='pyeapi',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Python Client for eAPI',
long_description=long_description,
# The project's main homepage.
url='https://github.com/arista-eosplus/pyeapi',
# Author details
author='Arista EOS+ CS',
author_email='[email protected]',
# Choose your license
license='BSD-3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Topic :: System :: Networking',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='networking arista eos eapi',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['netaddr'],
# List additional dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest', 'pep8', 'pyflakes', 'twine'],
'test': ['coverage', 'mock'],
},
)
def install():
if "install" in sys.argv:
return True
else:
return False
# Use the following to dynamically build pyeapi module documentation
if install() and environ.get('READTHEDOCS'):
print('This method is only called by READTHEDOCS.')
from subprocess import Popen
proc = Popen(['make', 'modules'], cwd='docs/')
(_, err) = proc.communicate()
return_code = proc.wait()
if return_code or err:
raise ('Failed to make modules.(%s:%s)' % (return_code, err))
| {
"content_hash": "4969effa1132ba9ed318836f19e01bb8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 82,
"avg_line_length": 32.56043956043956,
"alnum_prop": 0.6483293958825515,
"repo_name": "mzbenami/pyeapi",
"id": "3048963bbc28bdf87a1228a9571e1e4761b3f0f5",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1560"
},
{
"name": "Python",
"bytes": "599128"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from RPi import __author__, __version__, __license__
setup(
name='RPi',
version=__version__,
description='RPi.GPIO emulator',
license=__license__,
author=__author__,
author_email='[email protected]',
url='http://nosix.github.io/raspberry-gpio-emulator/',
keywords='raspberry pi gpio emulator python3',
packages=find_packages(),
install_requires=[],
)
| {
"content_hash": "3755fff9bb3692c438772bed59c886c4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 27.25,
"alnum_prop": 0.658256880733945,
"repo_name": "nosix/raspberry-gpio-emulator",
"id": "c6e82d2ce04667ed0574c267f02f86fddf71b084",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "736"
},
{
"name": "Python",
"bytes": "26208"
}
],
"symlink_target": ""
} |
"""
vcoptparse is short for Value-Centric Option Parser. It's a tiny argument parsing library. It has
less features than optparse or argparse, but it kicks more ass.
optparse and argparse allow the client to specify the flags that should be parsed, and as an
afterthought specify what keys should appear in the options dictionary when the parse is over.
vcoptparse works the other way around: you specify the keys and how to determine the keys from the
command line. That's why it's called "value-centric".
Here is a simple example:
>>> op = OptParser()
>>> op["verbose"] = BoolFlag("--verbose")
>>> op["count"] = IntFlag("--count", 5) # Default count is 5
>>> op["infiles"] = ManyPositionalArgs()
>>> op.parse(["foo.py", "--count", "5", "file1.txt", "file2.txt"])
{'count': 5, 'verbose': False, 'infiles': ['file1.txt', 'file2.txt']}
"""
class NoValueClass(object):
pass
NoValue = NoValueClass()
class Arg(object):
pass
class OptError(StandardError):
pass
class OptParser(object):
def __init__(self):
self.parsers_by_key = {}
self.parsers_in_order = []
def __setitem__(self, key, parser):
assert isinstance(parser, Arg)
if key in self.parsers_by_key: del self[key]
assert parser not in self.parsers_by_key.values()
self.parsers_by_key[key] = parser
self.parsers_in_order.append((key, parser))
def __getitem__(self, key):
return self.parsers_by_key[key]
def __delitem__(self, key):
self.parsers_in_order.remove((key, self.parsers_by_key[key]))
del self.parsers_by_key[key]
def parse(self, args):
args = args[1:] # Cut off name of program
values = dict((key, NoValue) for key in self.parsers_by_key.keys())
def name_for_key(key):
return getattr(self.parsers_by_key[key], "name", key)
def set_value(key, new_value):
combiner = getattr(self.parsers_by_key[key], "combiner", enforce_one_combiner)
try:
values[key] = combiner(values[key], new_value)
except OptError, e:
raise OptError(str(e) % {"name": name_for_key(key)})
# Build flag table
flags = {}
for (key, parser) in self.parsers_in_order:
if hasattr(parser, "flags"):
for flag in parser.flags:
assert flag.startswith("-")
if flag in flags:
raise ValueError("The flag %r has two different meanings." % flag)
flags[flag] = (key, parser)
# Handle flag arguments and store positional arguments
positionals = []
while args:
arg = args.pop(0)
if arg.startswith("-"):
if arg in flags:
key, parser = flags[arg]
set_value(key, parser.flag(arg, args))
else:
raise OptError("Don't know how to handle flag %r" % arg)
else:
positionals.append(arg)
# Handle positional arguments
for (key, parser) in self.parsers_in_order:
if hasattr(parser, "positional"):
set_value(key, parser.positional(positionals))
if positionals:
raise OptError("Unexpected extra positional argument(s): %s" % ", ".join(repr(x) for x in positionals))
# Apply defaults
for (key, parser) in self.parsers_by_key.iteritems():
if values[key] is NoValue:
if hasattr(parser, "default") and parser.default is not NoValue:
values[key] = parser.default
else:
raise OptError("You need to specify a value for %r" % name_for_key(key))
return values
# Combiners (indicate how to combine repeat specifications of the same flag)
def most_recent_combiner(old, new):
return new
def enforce_one_combiner(old, new):
if old is not NoValue:
raise OptError("%(name)r should only be specified once.")
return new
def append_combiner(old, new):
if old is NoValue: old = []
return old + [new]
# Converters (indicate how to convert from string arguments to values)
def bool_converter(x):
if x.lower() in ["yes", "true", "y", "t"]: return True
elif x.lower() in ["no", "false", "n", "f"]: return False
else: raise OptError("Expected a yes/no value. Got %r." % x)
def int_converter(x):
try: return int(x)
except ValueError: raise OptError("Expected an integer. Got %r." % x)
def float_converter(x):
try: return float(x)
except ValueError: raise OptError("Expected a float. Got %r." % x)
def choice_converter(choices):
def check(x):
if x in choices: return x
else: raise OptError("Expected one of %s. Got %r." % (", ".join(choices), x))
return check
# Standard argument parsers for common situations
class BoolFlag(Arg):
def __init__(self, arg, invert=False):
assert isinstance(invert, bool)
self.flags = [arg]
self.default = invert
def flag(self, flag, args):
return not self.default
class ChoiceFlags(Arg):
def __init__(self, choices, default = NoValue):
assert all(isinstance(x, str) for x in choices)
self.flags = choices
self.default = default
def flag(self, flag, args):
return flag.lstrip("-")
class ValueFlag(Arg):
def __init__(self, name, converter = str, default = NoValue, combiner = enforce_one_combiner):
assert isinstance(name, str)
assert callable(converter)
assert callable(combiner)
self.flags = [name]
self.converter = converter
self.combiner = combiner
self.default = default
def flag(self, flag, args):
args_gotten = 0
try: value = args.pop(0)
except IndexError:
raise OptError("Flag %r expects %d argument(s), but only got %d." % (flag, len(self.converters), args_gotten))
try: value2 = self.converter(value)
except OptError, e:
raise OptError("Problem in argument %d to flag %r: %s" % (args_gotten + 1, flag, e))
args_gotten += 1
return value2
class StringFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, str, default = default)
class IntFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, int_converter, default = default)
class FloatFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, float_converter, default = default)
class ChoiceFlag(ValueFlag):
def __init__(self, name, choices, default = NoValue):
ValueFlag.__init__(self, name, choice_converter(choices), default = default)
class MultiValueFlag(Arg):
def __init__(self, name, converters = [str], default = NoValue, combiner = enforce_one_combiner):
assert isinstance(name, str)
assert all(callable(x) for x in converters)
assert callable(combiner)
self.flags = [name]
self.converters = converters
self.combiner = combiner
self.default = default
def flag(self, flag, args):
new_values = ()
args_gotten = 0
for converter in self.converters:
try: value = args.pop(0)
except IndexError:
raise OptError("Flag %r expects %d argument(s), but only got %d." % (flag, len(self.converters), args_gotten))
try: value2 = converter(value)
except OptError, e:
raise OptError("Problem in argument %d to flag %r: %s" % (args_gotten + 1, flag, e))
new_values += (value2, )
args_gotten += 1
return new_values
class AllArgsAfterFlag(Arg):
def __init__(self, name, converter = str, default = NoValue):
assert isinstance(name, str)
assert callable(converter)
self.flags = [name]
self.converter = converter
self.default = default
def flag(self, flag, args):
args2 = []
for arg in args:
try: args2.append(self.converter(arg))
except OptError, e: raise OptError("For %r: %s" % (flag, e))
del args[:] # We consume all arguments remaining
return args2
class PositionalArg(Arg):
def __init__(self, name = None, converter = str, default = NoValue):
assert callable(converter)
self.name = name
self.converter = converter
self.default = default
def positional(self, args):
try: value = args.pop(0)
except IndexError:
if self.default is NoValue:
if self.name is None:
raise OptError("Too few positional arguments.")
else:
raise OptError("Too few positional arguments; need a value for %r." % self.name)
else:
return NoValue
try: value2 = self.converter(value)
except OptError, e:
if self.name is None: raise
else: raise OptError("For %r: %s" % (self.name, e))
return value2
class ManyPositionalArgs(Arg):
def __init__(self, name = None, converter = str):
assert callable(converter)
self.name = name
self.converter = converter
def positional(self, args):
args2 = []
for arg in args:
try: args2.append(self.converter(arg))
except OptError, e:
if self.name is None: raise
else: raise OptError("For %r: %s" % (self.name, e))
del args[:] # We consume all arguments remaining
return args2
| {
"content_hash": "9c990fc805e2643e3fefc0a62b70ca6a",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 126,
"avg_line_length": 34.02768166089965,
"alnum_prop": 0.5840959934919666,
"repo_name": "robertjpayne/rethinkdb",
"id": "c1b7eb5f18bf50f7e157cea4e8f9177ef3abbbfa",
"size": "9888",
"binary": false,
"copies": "48",
"ref": "refs/heads/next",
"path": "scripts/VirtuaBuild/vcoptparse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "2597"
},
{
"name": "C",
"bytes": "80175"
},
{
"name": "C++",
"bytes": "8609176"
},
{
"name": "CSS",
"bytes": "403688"
},
{
"name": "CoffeeScript",
"bytes": "539374"
},
{
"name": "HTML",
"bytes": "75496"
},
{
"name": "Haskell",
"bytes": "13234"
},
{
"name": "Java",
"bytes": "1889375"
},
{
"name": "JavaScript",
"bytes": "672427"
},
{
"name": "Makefile",
"bytes": "67067"
},
{
"name": "Nginx",
"bytes": "728"
},
{
"name": "Perl",
"bytes": "6368"
},
{
"name": "Protocol Buffer",
"bytes": "42521"
},
{
"name": "Python",
"bytes": "4453630"
},
{
"name": "Roff",
"bytes": "572"
},
{
"name": "Ruby",
"bytes": "144432"
},
{
"name": "Shell",
"bytes": "61859"
},
{
"name": "XSLT",
"bytes": "11895"
}
],
"symlink_target": ""
} |
import datetime
import decimal
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils.translation import ugettext_lazy as _
import random
from ..pricing import Price
from ..product.models import Variant
from ..util import countries
from . import signals
from .exceptions import EmptyCart
class OrderManager(models.Manager):
def get_from_cart(self, cart, instance=None):
'''
Create an order from the user's cart, possibly discarding any previous
orders created for this cart.
'''
from .handler import partitioner_queue
if cart.is_empty():
raise EmptyCart("Cannot create empty order.")
previous_orders = self.filter(cart=cart)
if not instance:
order = self.model.objects.create(cart=cart, user=cart.owner,
currency=cart.currency)
else:
order = instance
order.groups.all().delete()
try:
order.paymentvariant.delete()
except ObjectDoesNotExist:
pass
groups = partitioner_queue.partition(cart)
for group in groups:
delivery_group = order.create_delivery_group()
for item in group:
ordered_item = order.create_ordered_item(delivery_group, item)
ordered_item.save()
previous_orders = (previous_orders.exclude(pk=order.pk)
.filter(status='checkout'))
previous_orders.delete()
return order
class Order(models.Model):
STATUS_CHOICES = (
('checkout', _('undergoing checkout')),
('payment-pending', _('waiting for payment')),
('payment-complete', _('paid')),
('payment-failed', _('payment failed')),
('delivery', _('shipped')),
('cancelled', _('cancelled')),
)
# Do not set the status manually, use .set_status() instead.
status = models.CharField(_('order status'), max_length=32,
choices=STATUS_CHOICES, default='checkout')
created = models.DateTimeField(default=datetime.datetime.now,
editable=False, blank=True)
last_status_change = models.DateTimeField(default=datetime.datetime.now,
editable=False, blank=True)
user = models.ForeignKey(User, blank=True, null=True, related_name='orders')
currency = models.CharField(max_length=3)
billing_first_name = models.CharField(_("first name"),
max_length=256, blank=True)
billing_last_name = models.CharField(_("last name"),
max_length=256, blank=True)
billing_company_name = models.CharField(_("company name"),
max_length=256, blank=True)
billing_street_address_1 = models.CharField(_("street address 1"),
max_length=256, blank=True)
billing_street_address_2 = models.CharField(_("street address 2"),
max_length=256, blank=True)
billing_city = models.CharField(_("city"), max_length=256, blank=True)
billing_postal_code = models.CharField(_("postal code"),
max_length=20, blank=True)
billing_country = models.CharField(_("country"),
choices=countries.COUNTRY_CHOICES,
max_length=2, blank=True)
billing_country_area = models.CharField(_("country administrative area"),
max_length=128, blank=True)
billing_tax_id = models.CharField(_("tax ID"), max_length=40, blank=True)
billing_phone = models.CharField(_("phone number"),
max_length=30, blank=True)
payment_type = models.CharField(max_length=256, blank=True)
token = models.CharField(max_length=32, blank=True, default='')
class Meta:
# Use described string to resolve ambiguity of the word 'order' in English.
verbose_name = _('order (business)')
verbose_name_plural = _('orders (business)')
ordering = ('-last_status_change',)
def __unicode__(self):
return _('Order #%d') % self.id
def save(self, *args, **kwargs):
if not self.token:
for i in xrange(100):
token = ''.join(random.sample(
'0123456789abcdefghijklmnopqrstuvwxyz', 32))
if not Order.objects.filter(token=token).count():
self.token = token
break
return super(Order, self).save(*args, **kwargs)
@property
def billing_full_name(self):
return u'%s %s' % (self.billing_first_name, self.billing_last_name)
def set_status(self, new_status):
old_status = self.status
self.status = new_status
self.last_status_change = datetime.datetime.now()
self.save()
signals.order_status_changed.send(sender=type(self), instance=self,
old_status=old_status)
def subtotal(self):
return sum([g.subtotal() for g in self.groups.all()],
Price(0, currency=self.currency))
def delivery_price(self):
return sum([g.delivery_price() for g in self.groups.all()],
Price(0, currency=self.currency))
def payment_price(self):
try:
return Price(self.paymentvariant.price,
currency=self.currency)
except ObjectDoesNotExist:
return Price(0, currency=self.currency)
def total(self):
payment_price = self.payment_price()
return payment_price + sum([g.total() for g in self.groups.all()],
Price(0, currency=self.currency))
def create_delivery_group(self):
return self.groups.create(order=self)
def create_ordered_item(self, delivery_group, item):
price = item.get_unit_price()
variant = item.variant.get_subtype_instance()
name = unicode(variant)
ordered_item_class = self.get_ordered_item_class()
ordered_item = ordered_item_class(delivery_group=delivery_group,
product_variant=item.variant,
product_name=name,
quantity=item.quantity,
unit_price_net=price.net,
unit_price_gross=price.gross)
return ordered_item
def get_ordered_item_class(self):
return OrderedItem
class DeliveryGroup(models.Model):
order = models.ForeignKey(Order, related_name='groups')
delivery_type = models.CharField(max_length=256, blank=True)
def subtotal(self):
return sum([i.price() for i in self.items.all()],
Price(0, currency=self.order.currency))
def delivery_price(self):
try:
return Price(self.deliveryvariant.price,
currency=self.order.currency)
except ObjectDoesNotExist:
return Price(0, currency=self.order.currency)
def total(self):
delivery_price = self.delivery_price()
return delivery_price + sum([i.price() for i in self.items.all()],
Price(0, currency=self.order.currency))
class OrderedItem(models.Model):
delivery_group = models.ForeignKey(DeliveryGroup, related_name='items')
product_variant = models.ForeignKey(Variant, blank=True, null=True,
related_name='+',
on_delete=models.SET_NULL)
product_name = models.CharField(max_length=128)
quantity = models.DecimalField(_('quantity'),
max_digits=10, decimal_places=4)
unit_price_net = models.DecimalField(_('unit price (net)'),
max_digits=12, decimal_places=4)
unit_price_gross = models.DecimalField(_('unit price (gross)'),
max_digits=12, decimal_places=4)
def unit_price(self):
return Price(net=self.unit_price_net, gross=self.unit_price_gross,
currency=self.delivery_group.order.currency)
def price(self):
net = self.unit_price_net * self.quantity
gross = self.unit_price_gross * self.quantity
return Price(net=net.quantize(decimal.Decimal('0.01')),
gross=gross.quantize(decimal.Decimal('0.01')),
currency=self.delivery_group.order.currency)
| {
"content_hash": "586a8d9eb21d5876b24b6f79090f9003",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 83,
"avg_line_length": 43.83743842364532,
"alnum_prop": 0.5624227441285538,
"repo_name": "fusionbox/satchless",
"id": "a1856ff7a0c3c967fb6a406256cef452334d0162",
"size": "8899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satchless/order/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47040"
},
{
"name": "HTML",
"bytes": "87824"
},
{
"name": "JavaScript",
"bytes": "23123"
},
{
"name": "Python",
"bytes": "376774"
}
],
"symlink_target": ""
} |
import string
import types
import cStringIO
## json.py implements a JSON (http://json.org) reader and writer.
## Copyright (C) 2005-2007 Patrick D. Logan, Ville H. Tuulos
## Contact mailto:[email protected]
##
## Slightly modified by Ville H. Tuulos for PyS60
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class _StringGenerator(object):
def __init__(self, string):
self.string = string
self.index = -1
def peek(self):
i = self.index + 1
if i < len(self.string):
return self.string[i]
else:
return None
def next(self):
self.index += 1
if self.index < len(self.string):
return self.string[self.index]
else:
raise StopIteration
def putback(self, ch):
self.index -= 1
def all(self):
return self.string
class _FileGenerator:
def __init__(self, fd):
self.fd = fd
self.peeked = ""
self.buf = ""
def peek(self):
if not self.peeked:
self.peeked = self.fd.read(1)
return self.peeked
def next(self):
if self.peeked:
r = self.peeked
self.peeked = ""
else:
r = self.fd.read(1)
if r:
#self.buf += r
return r
else:
raise StopIteration
def putback(self, ch):
self.peeked = ch
def all(self):
return self.buf
class WriteException(Exception):
pass
class ReadException(Exception):
pass
class JsonReader(object):
hex_digits = {'A': 10,'B': 11,'C': 12,'D': 13,'E': 14,'F':15}
escapes = {'t':'\t','n':'\n','f':'\f','r':'\r','b':'\b'}
def read_stream(self, fd):
self._generator = _FileGenerator(fd)
return self._read()
def read(self, s):
self._generator = _StringGenerator(s)
result = self._read()
return result
def _read(self):
self._eatWhitespace()
peek = self._peek()
if peek is None:
raise ReadException, "Nothing to read: '%s'" % self._generator.all()
if peek == '{':
return self._readObject()
elif peek == '[':
return self._readArray()
elif peek == '"':
return self._readString()
elif peek == '-' or peek.isdigit():
return self._readNumber()
elif peek == 't':
return self._readTrue()
elif peek == 'f':
return self._readFalse()
elif peek == 'n':
return self._readNull()
elif peek == '/':
self._readComment()
return self._read()
else:
raise ReadException, "Input is not valid JSON: '%s'" % self._generator.all()
def _readTrue(self):
self._assertNext('t', "true")
self._assertNext('r', "true")
self._assertNext('u', "true")
self._assertNext('e', "true")
return True
def _readFalse(self):
self._assertNext('f', "false")
self._assertNext('a', "false")
self._assertNext('l', "false")
self._assertNext('s', "false")
self._assertNext('e', "false")
return False
def _readNull(self):
self._assertNext('n', "null")
self._assertNext('u', "null")
self._assertNext('l', "null")
self._assertNext('l', "null")
return None
def _assertNext(self, ch, target):
if self._next() != ch:
raise ReadException, "Trying to read %s: '%s'" % (target, self._generator.all())
def _readNumber(self, allow_exp = True):
isfloat = False
result = self._next()
exponent = 0
peek = self._peek()
while peek is not None and (peek.isdigit() or peek == "." or (allow_exp and peek == "e")):
isfloat = isfloat or peek == "." or peek == "e"
if peek == "e":
self._next() # eat 'e'
exponent = self._readNumber(False)
break
result = result + self._next()
peek = self._peek()
try:
if isfloat:
return float(result) * 10 ** exponent
else:
return int(result)
except ValueError:
raise ReadException, "Not a valid JSON number: '%s'" % result
def _readString(self):
result = cStringIO.StringIO()
next = self._generator.next
assert next() == '"'
try:
while True:
ch = next()
if ch == '"':
self._generator.putback(ch)
break
if ch == "\\":
ch = next()
if ch in 'brnft':
ch = self.escapes[ch]
elif ch == "u":
ch4096 = next()
ch256 = next()
ch16 = next()
ch1 = next()
n = 4096 * self._hexDigitToInt(ch4096)
n += 256 * self._hexDigitToInt(ch256)
n += 16 * self._hexDigitToInt(ch16)
n += self._hexDigitToInt(ch1)
ch = unichr(n)
elif ch not in '"/\\':
raise ReadException, "Not a valid escaped JSON character: '%s' in %s" % (ch, self._generator.all())
result.write(ch)
except StopIteration:
raise ReadException, "Not a valid JSON string: '%s'" % self._generator.all()
assert self._next() == '"'
return result.getvalue()
def _hexDigitToInt(self, ch):
try:
result = self.hex_digits[ch.upper()]
except KeyError:
try:
result = int(ch)
except ValueError:
raise ReadException, "The character %s is not a hex digit." % ch
return result
def _readComment(self):
assert self._next() == "/"
second = self._next()
if second == "/":
self._readDoubleSolidusComment()
elif second == '*':
self._readCStyleComment()
else:
raise ReadException, "Not a valid JSON comment: %s" % self._generator.all()
def _readCStyleComment(self):
try:
done = False
while not done:
ch = self._next()
done = (ch == "*" and self._peek() == "/")
if not done and ch == "/" and self._peek() == "*":
raise ReadException, "Not a valid JSON comment: %s, '/*' cannot be embedded in the comment." % self._generator.all()
self._next()
except StopIteration:
raise ReadException, "Not a valid JSON comment: %s, expected */" % self._generator.all()
def _readDoubleSolidusComment(self):
try:
ch = self._next()
while ch != "\r" and ch != "\n":
ch = self._next()
except StopIteration:
pass
def _readArray(self):
result = []
assert self._next() == '['
done = self._peek() == ']'
while not done:
item = self._read()
result.append(item)
self._eatWhitespace()
done = self._peek() == ']'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
assert ']' == self._next()
return result
def _readObject(self):
result = {}
assert self._next() == '{'
done = self._peek() == '}'
while not done:
key = self._read()
if type(key) is not types.StringType:
raise ReadException, "Not a valid JSON object key (should be a string): %s" % key
self._eatWhitespace()
ch = self._next()
if ch != ":":
raise ReadException, "Not a valid JSON object: '%s' due to: '%s'" % (self._generator.all(), ch)
self._eatWhitespace()
val = self._read()
result[key] = val
self._eatWhitespace()
done = self._peek() == '}'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
assert self._next() == "}"
return result
def _eatWhitespace(self):
p = self._peek()
while p is not None and p in string.whitespace or p == '/':
if p == '/':
self._readComment()
else:
self._next()
p = self._peek()
def _peek(self):
return self._generator.peek()
def _next(self):
return self._generator.next()
class JsonWriter(object):
def _append(self, s):
self._results.write(s)
def write(self, obj, escaped_forward_slash=False):
self._escaped_forward_slash = escaped_forward_slash
self._results = cStringIO.StringIO()
self._write(obj)
return self._results.getvalue()
def _write(self, obj):
ty = type(obj)
if ty is types.DictType:
n = len(obj)
self._append("{")
for k, v in obj.items():
self._write(k)
self._append(":")
self._write(v)
n = n - 1
if n > 0:
self._append(",")
self._append("}")
elif ty is types.ListType or ty is types.TupleType:
n = len(obj)
self._append("[")
for item in obj:
self._write(item)
n = n - 1
if n > 0:
self._append(",")
self._append("]")
elif ty is types.StringType or ty is types.UnicodeType:
self._append('"')
obj = obj.replace('\\', r'\\')
if self._escaped_forward_slash:
obj = obj.replace('/', r'\/')
obj = obj.replace('"', r'\"')
obj = obj.replace('\b', r'\b')
obj = obj.replace('\f', r'\f')
obj = obj.replace('\n', r'\n')
obj = obj.replace('\r', r'\r')
obj = obj.replace('\t', r'\t')
self._append(obj)
self._append('"')
elif ty is types.IntType or ty is types.LongType:
self._append(str(obj))
elif ty is types.FloatType:
self._append("%f" % obj)
elif obj is True:
self._append("true")
elif obj is False:
self._append("false")
elif obj is None:
self._append("null")
else:
raise WriteException, "Cannot write in JSON: %s" % repr(obj)
def write(obj, escaped_forward_slash=False):
return JsonWriter().write(obj, escaped_forward_slash)
def read(s):
return JsonReader().read(s)
def read_stream(fd):
return JsonReader().read_stream(fd)
# Maintain interface compatibility with Python 2.6's builtin json library
dumps = write
loads = read
load = read_stream
| {
"content_hash": "377fb25e61156938006966c735285f52",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 136,
"avg_line_length": 33.98066298342541,
"alnum_prop": 0.4806926266157223,
"repo_name": "yonggang985/Sniper",
"id": "b0977d717f0cac0b5eda5f30f47eb4b7478c70dc",
"size": "12301",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/localjson.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "90653"
},
{
"name": "C++",
"bytes": "1722452"
},
{
"name": "Makefile",
"bytes": "21654"
},
{
"name": "Objective-C",
"bytes": "645"
},
{
"name": "Python",
"bytes": "103923"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemgroup_systemcmdpolicy_binding(base_resource) :
""" Binding class showing the systemcmdpolicy that can be bound to systemgroup.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._groupname = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority of the command policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority of the command policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""The name of command policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""The name of command policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def groupname(self) :
ur"""Name of the system group.<br/>Minimum length = 1.
"""
try :
return self._groupname
except Exception as e:
raise e
@groupname.setter
def groupname(self, groupname) :
ur"""Name of the system group.<br/>Minimum length = 1
"""
try :
self._groupname = groupname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemgroup_systemcmdpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemgroup_systemcmdpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.groupname is not None :
return str(self.groupname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = systemgroup_systemcmdpolicy_binding()
updateresource.groupname = resource.groupname
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [systemgroup_systemcmdpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].groupname = resource[i].groupname
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = systemgroup_systemcmdpolicy_binding()
deleteresource.groupname = resource.groupname
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [systemgroup_systemcmdpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].groupname = resource[i].groupname
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, groupname) :
ur""" Use this API to fetch systemgroup_systemcmdpolicy_binding resources.
"""
try :
obj = systemgroup_systemcmdpolicy_binding()
obj.groupname = groupname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, groupname, filter_) :
ur""" Use this API to fetch filtered set of systemgroup_systemcmdpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemgroup_systemcmdpolicy_binding()
obj.groupname = groupname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, groupname) :
ur""" Use this API to count systemgroup_systemcmdpolicy_binding resources configued on NetScaler.
"""
try :
obj = systemgroup_systemcmdpolicy_binding()
obj.groupname = groupname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, groupname, filter_) :
ur""" Use this API to count the filtered set of systemgroup_systemcmdpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemgroup_systemcmdpolicy_binding()
obj.groupname = groupname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class systemgroup_systemcmdpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.systemgroup_systemcmdpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemgroup_systemcmdpolicy_binding = [systemgroup_systemcmdpolicy_binding() for _ in range(length)]
| {
"content_hash": "cd10ca6726263e2cd24f3538e1e7f2ac",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 137,
"avg_line_length": 30.497584541062803,
"alnum_prop": 0.7110723903057183,
"repo_name": "atopuzov/nitro-python",
"id": "29d6026ec9e5b5f7a87f856eedda40b89e2a56ae",
"size": "6927",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/system/systemgroup_systemcmdpolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10881939"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
class Shell(object):
"""Represents an abstract Mojo shell."""
def ServeLocalDirectory(self, local_dir_path, port=0,
additional_mappings=None):
"""Serves the content of the local (host) directory, making it available to
the shell under the url returned by the function.
The server will run on a separate thread until the program terminates. The
call returns immediately.
Args:
local_dir_path: path to the directory to be served
port: port at which the server will be available to the shell
additional_mappings: List of tuples (prefix, local_base_path) mapping
URLs that start with |prefix| to local directory at |local_base_path|.
The prefixes should skip the leading slash.
Returns:
The url that the shell can use to access the content of |local_dir_path|.
"""
raise NotImplementedError()
def ForwardHostPortToShell(self, host_port):
"""Forwards a port on the host machine to the same port wherever the shell
is running.
This is a no-op if the shell is running locally.
"""
raise NotImplementedError()
def Run(self, arguments):
"""Runs the shell with given arguments until shell exits, passing the stdout
mingled with stderr produced by the shell onto the stdout.
Returns:
Exit code retured by the shell or None if the exit code cannot be
retrieved.
"""
raise NotImplementedError()
def RunAndGetOutput(self, arguments):
"""Runs the shell with given arguments until shell exits and returns the
output.
Args:
arguments: list of arguments for the shell
Returns:
A tuple of (return_code, output). |return_code| is the exit code returned
by the shell or None if the exit code cannot be retrieved. |output| is the
stdout mingled with the stderr produced by the shell.
"""
raise NotImplementedError()
| {
"content_hash": "f2985fa143c5e65b3c00bff789b17506",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 35.5,
"alnum_prop": 0.6901408450704225,
"repo_name": "chinmaygarde/mojo",
"id": "bb22ea41ca90917ed2a6054f78376a55bc6d6835",
"size": "2081",
"binary": false,
"copies": "4",
"ref": "refs/heads/ios",
"path": "mojo/devtools/common/devtoolslib/shell.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1880713"
},
{
"name": "C++",
"bytes": "35838874"
},
{
"name": "Dart",
"bytes": "969667"
},
{
"name": "Go",
"bytes": "186519"
},
{
"name": "Groff",
"bytes": "29030"
},
{
"name": "HTML",
"bytes": "41854"
},
{
"name": "Java",
"bytes": "1274683"
},
{
"name": "JavaScript",
"bytes": "208100"
},
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Objective-C",
"bytes": "75638"
},
{
"name": "Objective-C++",
"bytes": "408801"
},
{
"name": "Protocol Buffer",
"bytes": "1048"
},
{
"name": "Python",
"bytes": "5645880"
},
{
"name": "Shell",
"bytes": "148167"
},
{
"name": "Yacc",
"bytes": "31141"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from django import temlpate
from datetime import date, timedelta
register = template.Library()
@register.filter
def to_class_name(value):
return value.__class__.__name__
| {
"content_hash": "de8dbd4cbcb794b8336cf92d18670281",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 36,
"avg_line_length": 21.625,
"alnum_prop": 0.7572254335260116,
"repo_name": "per7inac1ousQ/Directories",
"id": "8f100ca9d235481648efdcdcc1efc29a195f0089",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Directories/Directories_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2490"
},
{
"name": "Python",
"bytes": "111287"
},
{
"name": "TeX",
"bytes": "2266"
}
],
"symlink_target": ""
} |
import time;
from time import strftime;
import urllib2;
import sys;
#prettyfull welcome message
print("***Welcome to LegendCraft HeartbeatSaver***\n");
print("...:: This program is designed to send ::...\n ....:: a Heartbeat to ClassiCube.net! ::.... \n");
print(".::.:.:.:.:.:.:.:.:.:.:.:.:.:.:.:.:.:.:.::.\n");
#variables
rawData = []
finalData = ""
count = 1
#send the request (called from checkData() )
def sendHeartBeat(rawData, count):
finalData = "http://www.classicube.net/heartbeat.jsp?public=" + rawData[6].replace("\n", "") + "&max=" + rawData[4].replace("\n", "") + "&users=" + rawData[3].replace("\n", "") + "&port=" + rawData[2].replace("\n", "") + "&version=7&salt=" + rawData[0].replace("\n", "") + "&name=" + (rawData[5].replace("\n", ""));
response = urllib2.urlopen(finalData.replace(" ","%20"));#grab the response
responseData = response.read();
print str(strftime("%I:%M")) + " - Sending Heartbeat... Count: " + str(count);
if(responseData.startswith("http://www.classicube/server/play")):#check that the response does not contain errors
print "Heartbeat sent!\n";
try:
with open("externalurl.txt"): pass
except IOError:
print "WARNING: externalurl.txt not found. HeartBeat Saver will now close..."
time.sleep(5);
sys.exit(0);
externalUrlFile = open("externalurl.txt", "w");#open, wipe, and rewrite externalurl.txt
externalUrlFile.write(responseData);
externalUrlFile.close();
else:
print "Heartbeat failed: " + responseData + "\n";
response.close();
#check info from heartbeat, start the sending loop (called from main while loop)
def checkData():
lineNum = 0
try:
file = open("heartbeatdata.txt")
except IOError:
print "WARNING: Heartbeatdata.txt not found. HeartBeat Saver will now close..."
time.sleep(5)
sys.exit(0)
for line in file:
rawData.append(line)
lineNum += 1
if(lineNum != 7):
print "WARNING: Heartbeatdata.txt has been damaged or corrupted."
sys.exit(0)
sendHeartBeat(rawData, count)
#gather data, main loop
while(True):
checkData();
time.sleep(10);
count += 1;
| {
"content_hash": "8ed6b48e0631027b65a1fcd51072b491",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 320,
"avg_line_length": 35.476190476190474,
"alnum_prop": 0.6089485458612975,
"repo_name": "LeChosenOne/LegendCraft",
"id": "19d6b8db77c771c1f38dd08dd84bf7ed1037515d",
"size": "3344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HeartBeat/HeartBeatSaver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "3777559"
},
{
"name": "Java",
"bytes": "5450"
},
{
"name": "Lua",
"bytes": "3539"
},
{
"name": "Python",
"bytes": "3344"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import os
from decimal import Decimal
from unittest import skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict,
modelform_factory,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils._os import upath
from .models import (
Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book,
Category, Character, Colour, ColourfulItem, CommaSeparatedInteger,
CustomErrorMessage, CustomFF, CustomFieldForExclusionModel, DateTimePost,
DerivedBook, DerivedPost, Document, ExplicitPK, FilePathModel,
FlexibleDatePost, Homepage, ImprovedArticle, ImprovedArticleWithParentLink,
Inventory, Person, Photo, Post, Price, Product, Publication,
PublicationDefaults, StrictAssignmentAll, StrictAssignmentFieldSpecific,
Student, StumpJoke, TextFile, Triple, Writer, WriterProfile, test_images,
)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
with self.assertRaises(ValueError):
NoModelModelForm()
def test_empty_fields_to_fields_for_model(self):
"""
An argument of fields=() to fields_for_model should return an empty dictionary
"""
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"""
No fields on a ModelForm should actually result in no fields.
"""
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"""
No fields should be set on a model instance if construct_instance receives fields=().
"""
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
def test_blank_with_null_foreign_key_field(self):
"""
#13776 -- ModelForm's with models having a FK set to null=False and
required=False should be valid.
"""
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
def __init__(self, *args, **kwargs):
super(FormForTestingIsValid, self).__init__(*args, **kwargs)
self.fields['character'].required = False
char = Character.objects.create(username='user',
last_action=datetime.datetime.today())
data = {'study': 'Engineering'}
data2 = {'study': 'Engineering', 'character': char.pk}
# form is valid because required=False for field 'character'
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_blank_false_with_null_true_foreign_key_field(self):
"""
A ModelForm with a model having ForeignKey(blank=False, null=True)
and the form field set to required=False should allow the field to be
unset.
"""
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = '__all__'
def __init__(self, *args, **kwargs):
super(AwardForm, self).__init__(*args, **kwargs)
self.fields['character'].required = False
character = Character.objects.create(username='user', last_action=datetime.datetime.today())
award = Award.objects.create(name='Best sprinter', character=character)
data = {'name': 'Best tester', 'character': ''} # remove character
form = AwardForm(data=data, instance=award)
self.assertTrue(form.is_valid())
award = form.save()
self.assertIsNone(award.character)
def test_save_blank_false_with_required_false(self):
"""
A ModelForm with a model with a field set to blank=False and the form
field set to required=False should allow the field to be unset.
"""
obj = Writer.objects.create(name='test')
form = CustomWriterForm(data={'name': ''}, instance=obj)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.name, '')
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_extra_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
except FieldError as e:
# Make sure the exception contains some reference to the
# field responsible for the problem.
self.assertIn('no-field', e.args[0])
else:
self.fail('Invalid "no-field" field not caught')
def test_extra_declared_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
except FieldError:
self.fail('Declarative field raised FieldError incorrectly')
def test_extra_field_modelform_factory(self):
with self.assertRaises(FieldError):
modelform_factory(Person, fields=['no-field', 'name'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = '__all__'
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
# First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
with self.assertRaises(ValueError):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaises(ValueError):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th>
<td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
'url': forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20"></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(data={
'name': 'Category',
'url': 'http://www.example.com/category/',
'slug': '!%#*@',
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field('url').__class__, models.CharField)
self.assertIsInstance(form.fields['url'], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
class UniqueTest(TestCase):
"""
unique/unique_together validation.
"""
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature('supports_nullable_unique_constraints')
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': ''})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
"""
If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored.
"""
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
DateTimePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally",
posted=datetime.datetime(2008, 9, 3, 10, 10, 1))
# 'title' has unique_for_date='posted'
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
'slug': {
'unique': "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug='teddy-bear-blue')
form = CustomProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "%(model_name)s's %(field_labels)s not unique.",
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({'price': '6.00', 'quantity': '1'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."])
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
'title': {
'unique_for_date': (
"%(model_name)s's %(field_label)s not unique "
"for %(date_field_label)s date."
),
}
}
Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."])
class ModelToDictTests(TestCase):
"""
Tests for forms.models.model_to_dict
"""
def test_model_to_dict_many_to_many(self):
categories = [
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art.save()
with self.assertNumQueries(1):
d = model_to_dict(art)
# Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
# Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
def test_reuse_prefetched(self):
# model_to_dict should not hit the database if it can reuse
# the data populated by prefetch_related.
categories = [
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art = Article.objects.prefetch_related('categories').get(pk=art.pk)
with self.assertNumQueries(0):
d = model_to_dict(art)
# Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
# Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third test", slug="third-test", url="third")
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(self.c1.id), str(self.c2.id)]
})
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
six.text_type(f),
'''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br />
<span class="helptext">Use both first and last names.</span></td></tr>'''
)
art = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article='Hello.'
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
f = ArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(self.w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
def test_m2m_initial_callable(self):
"""
Regression for #10349: A callable can be provided as the initial value for an m2m field
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'categories':
kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(Article, fields=['headline', 'categories'],
formfield_callback=formfield_for_dbfield)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" /></li>
<li><label for="id_categories">Categories:</label>
<select multiple="multiple" name="categories" id="id_categories">
<option value="%d" selected="selected">Entertainment</option>
<option value="%d" selected="selected">It&39;s a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk))
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(
f.errors['slug'],
["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]
)
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
six.text_type(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=self.w_royko)
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'pub_date')
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
six.text_type(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
w_royko = Writer.objects.create(name='Mike Royko')
art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=w_royko)
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>'''
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, 'New headline')
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(self.w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(self.c1.id), six.text_type(self.c2.id)]
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
form_data['categories'] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
form_data['categories'] = [six.text_type(self.c1.id), six.text_type(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
cat = Category.objects.create(name='Third test')
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=cat.id).name, 'Third')
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> </li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
<option value="%s">Fourth</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk))
class ModelChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
# ModelChoiceField ############################################################
def test_modelchoicefield(self):
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
self.assertEqual(f.clean(self.c3.id).name, 'Third')
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(f.clean(c4.id).name, 'Fourth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='4th').delete()
with self.assertRaises(ValidationError):
f.clean(c4.id)
def test_modelchoicefield_choices(self):
f = forms.ModelChoiceField(Category.objects.filter(pk=self.c1.id), required=False)
self.assertIsNone(f.clean(''))
self.assertEqual(f.clean(str(self.c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# len can be called on choices
self.assertEqual(len(f.choices), 2)
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
with self.assertRaises(ValidationError):
f.clean(self.c3.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (self.c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'category Entertainment'),
(self.c2.pk, "category It's a test"),
(self.c3.pk, 'category Third')])
def test_modelchoicefield_11183(self):
"""
Regression test for ticket #11183.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all())
form1 = ModelChoiceForm()
field1 = form1.fields['category']
# To allow the widget to change the queryset of field1.widget.choices correctly,
# without affecting other forms, the following must hold:
self.assertIsNot(field1, ModelChoiceForm.base_fields['category'])
self.assertIs(field1.widget.choices.field, field1)
def test_modelchoicefield_22745(self):
"""
#22745 -- Make sure that ModelChoiceField with RadioSelect widget
doesn't produce unnecessary db queries when accessing its BoundField's
attrs.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all(), widget=forms.RadioSelect)
form = ModelChoiceForm()
field = form['category'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
class ModelMultipleChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([self.c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(self.c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([str(self.c1.id), str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False)
self.assertQuerysetEqual(f.clean([self.c1.id, str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False)
self.assertQuerysetEqual(f.clean((self.c1.id, str(self.c2.id))),
["Entertainment", "It's a test"], ordered=False)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), '0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), '0'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(self.c1.pk, 'multicategory Entertainment'),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, 'multicategory Third')])
def test_model_multiple_choice_number_of_queries(self):
"""
Test that ModelMultipleChoiceField does O(1) queries instead of
O(n) (#10156).
"""
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
Test that ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(),
validators=[my_validator])
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
"""
Test support of show_hidden_initial by ModelMultipleChoiceField.
"""
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(show_hidden_initial=True,
queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person1.pk), str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
"""
#22745 -- Make sure that ModelMultipleChoiceField with
CheckboxSelectMultiple widget doesn't produce unnecessary db queries
when accessing its BoundField's attrs.
"""
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple)
form = ModelMultipleChoiceForm()
field = form['categories'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all())
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={'initial-persons': writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
form = WriterForm(data={})
queryset = form.fields['persons'].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional `score` field
model = BetterWriter
fields = '__all__'
bw = BetterWriter.objects.create(name='Joe Better', score=10)
self.assertEqual(sorted(model_to_dict(bw)),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = '__all__'
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" /></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
data = {
'writer': six.text_type(self.w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" value="65" id="id_age" min="0" /></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=datetime.date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['publication'], None)
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertEqual(new_author.publication, None)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=datetime.date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertEqual(f.clean(False), False)
self.assertEqual(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
with self.assertRaises(ValidationError):
f.clean(False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm()
self.assertIn('name="myfile"', six.text_type(form))
self.assertNotIn('myfile-clear', six.text_type(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertIn('myfile-clear', six.text_type(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertEqual(bool(doc.myfile), False)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'})
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = six.text_type(form)
self.assertIn('something.txt', rendered)
self.assertIn('myfile-clear', rendered)
def test_render_empty_file_field(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
doc = Document.objects.create()
form = DocumentForm(instance=doc)
self.assertEqual(
str(form['myfile']),
'<input id="id_myfile" name="myfile" type="file" />'
)
def test_file_field_data(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertFalse(f.is_valid())
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertFalse(f.is_valid())
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
# If the previous file has been deleted, the file name can be reused
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertFalse(f.is_valid())
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
"""
Regression for #11149: save_form_data should be called only once
"""
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={'f': None})
form.save()
def test_file_field_multiple_save(self):
"""
Simulate a file upload and check how many times Model.save() gets
called. Test for bug #639.
"""
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = '__all__'
# Grab an image for testing.
filename = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.png', img, 'image/png')}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
"""
Regression test for #8842: FilePathField(blank=True)
"""
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
form = FPForm()
names = [p[1] for p in form['path'].field.choices]
names.sort()
self.assertEqual(names, ['---------', '__init__.py', 'models.py', 'test_uuid.py', 'tests.py'])
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(upath(__file__)), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(upath(__file__)), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect
# the image or its width/height properties.
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']}
)
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
def test_comma_separated_integer_field(self):
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
fields = '__all__'
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '1'})
f = CommaSeparatedIntegerForm({'field': '12'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '12'})
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '10,32'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '10,32'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_modelform_non_editable_field(self):
"""
When explicitely including a non-editable field in a ModelForm, the
error message should be explicit.
"""
# 'created', non-editable, is excluded by default
self.assertNotIn('created', ArticleForm().fields)
msg = "'created' cannot be specified for Article model form as it is a non-editable field"
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'created')
def test_http_prefixing(self):
"""
If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
form = HomepageForm({'url': 'example.com'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com')
form = HomepageForm({'url': 'example.com/test'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
six.text_type(f.media),
'''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>'''
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_prefetch_related_queryset(self):
"""
ModelChoiceField should respect a prefetch_related() on its queryset.
"""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
multicolor_item = ColourfulItem.objects.create()
multicolor_item.colours.add(blue, red)
red_item = ColourfulItem.objects.create()
red_item.colours.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ', '.join(c.name for c in obj.colours.all())
field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours'))
with self.assertNumQueries(4): # would be 5 if prefetch is ignored
self.assertEqual(tuple(field.choices), (
('', '---------'),
(multicolor_item.pk, 'blue, red'),
(red_item.pk, 'red'),
))
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(
six.text_type(CategoryForm()),
'''<tr><th><label for="id_description">Description:</label></th>
<td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>'''
)
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(
six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>'''
)
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" /></p>
<p><label for="id_colours">Colours:</label> <select multiple="multiple" name="colours" id="id_colours">
<option value="%(blue_pk)s">Blue</option>
</select></p>"""
% {'blue_pk': colour.pk})
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = '__all__'
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label> <input id="id_title" maxlength="30" name="title" type="text" /></p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}" />
<input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}" /></p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected="selected">direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di" /></p>
<p><label for="id_category">Category:</label> <select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected="selected">Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden" value="3" />
""".format(today_str)
)
empty_data = {
'title': '',
'date_published': today_str,
'initial-date_published': today_str,
'mode': 'di',
'initial-mode': 'di',
'category': '3',
'initial-category': '3',
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
def test_model_clean_error_messages(self):
data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>'
)
data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>'
)
data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'], ['Global error message.'])
class CustomCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise forms.ValidationError('Left and right should be equal')
return self.cleaned_data
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
"""
Regression test for #12960. Make sure the cleaned_data returned from
ModelForm.clean() is applied to the model instance.
"""
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean(self):
self.cleaned_data['name'] = self.cleaned_data['name'].upper()
return self.cleaned_data
data = {'name': 'Test', 'slug': 'test', 'url': '/test'}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, 'TEST')
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = '__all__'
self.assertEqual(list(ModelForm().fields.keys()), ['name', 'age'])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class Mixin(object):
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (Mixin, Form), {})().fields.keys()), [])
self.assertEqual(list(type(str('NewForm'), (Form2, Mixin, Form), {})().fields.keys()), ['foo'])
self.assertEqual(list(type(str('NewForm'), (Mixin, ModelForm, Form), {})().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Mixin, Form), {})().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Form, Mixin), {})().fields.keys()), ['name', 'age'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Form), {'age': None})().fields.keys()), ['name'])
def test_field_removal_name_clashes(self):
"""Regression test for https://code.djangoproject.com/ticket/22510."""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = '__all__'
class SubForm(MyForm):
media = None
self.assertIn('media', MyForm().fields)
self.assertNotIn('media', SubForm().fields)
self.assertTrue(hasattr(MyForm, 'media'))
self.assertTrue(hasattr(SubForm, 'media'))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = '__all__'
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = () # We don't need any fields from the model
class LimitChoicesToTest(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
def setUp(self):
self.threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
self.marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey relation can use ``limit_choices_to`` as a callable, re #2554.
"""
stumpjokeform = StumpJokeForm()
self.assertIn(self.threepwood, stumpjokeform.fields['most_recently_fooled'].queryset)
self.assertNotIn(self.marley, stumpjokeform.fields['most_recently_fooled'].queryset)
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToMany relation can use ``limit_choices_to`` as a callable, re #2554.
"""
stumpjokeform = StumpJokeForm()
self.assertIn(self.threepwood, stumpjokeform.fields['has_fooled_today'].queryset)
self.assertNotIn(self.marley, stumpjokeform.fields['has_fooled_today'].queryset)
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
Regression test for #23795: Make sure a custom field with a `queryset`
attribute but no `limit_choices_to` still works.
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields['custom'].queryset, 42)
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
"""Regression for #13095: Using base forms with widgets defined in Meta should not raise errors."""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIs(Form.base_fields['name'].widget, widget)
def test_factory_with_widget_argument(self):
""" Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name': widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
""" Regression for #19733 """
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
"""Test that a custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args,
[(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
with self.assertRaises(TypeError):
modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable')
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
with self.assertRaises(TypeError):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super(CustomMetaclass, cls).__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(six.with_metaclass(CustomMetaclass, forms.ModelForm)):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class StrictAssignmentTests(TestCase):
"""
Should a model do anything special with __setattr__() or descriptors which
raise a ValidationError, a model form should catch the error (#24706).
"""
def test_setattr_raises_validation_error_field_specific(self):
"""
A model ValidationError using the dict form should put the error
message into the correct key of form.errors.
"""
form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'title': ['Cannot set attribute', 'This field cannot be blank.']
})
def test_setattr_raises_validation_error_non_field(self):
"""
A model ValidationError not using the dict form should put the error
message into __all__ (i.e. non-field errors) on the form.
"""
form_class = modelform_factory(model=StrictAssignmentAll, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'__all__': ['Cannot set attribute'],
'title': ['This field cannot be blank.']
})
| {
"content_hash": "dd899fe53becf62f6f79873c8e55b268",
"timestamp": "",
"source": "github",
"line_count": 2827,
"max_line_length": 120,
"avg_line_length": 40.77325787053414,
"alnum_prop": 0.5962989953672375,
"repo_name": "Endika/django",
"id": "a2e7c664192c505848e302185fde72c885a55ba3",
"size": "115266",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/model_forms/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170527"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11459348"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "559fef244ca36f25c7e5075119932c8d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 165,
"avg_line_length": 37.857142857142854,
"alnum_prop": 0.7056603773584905,
"repo_name": "antoinecarme/pyaf",
"id": "7d8b2e93827c5fe3fb08a1513dd2b3de139a7366",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Anscombe/trend_LinearTrend/cycle_5/ar_/test_artificial_128_Anscombe_LinearTrend_5__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import os
import GafferUI
bookmarks = GafferUI.Bookmarks.acquire( application )
bookmarks.setDefault( os.getcwd() )
bookmarks.add( "Home", os.path.expandvars( "$HOME" ) )
bookmarks.add( "Desktop", os.path.expandvars( "$HOME/Desktop" ) )
fontBookmarks = GafferUI.Bookmarks.acquire( application, category="font" )
fontBookmarks.add( "Gaffer Fonts", os.path.expandvars( "$GAFFER_ROOT/fonts" ) )
shaderBookmarks = GafferUI.Bookmarks.acquire( application, category="shader" )
defaultShaderDirectory = os.path.expandvars( "$HOME/gaffer/shaders" )
try :
os.makedirs( defaultShaderDirectory )
except OSError :
# makedirs very unhelpfully raises an exception if
# the directory already exists, but it might also
# raise if it fails. we reraise only in the latter case.
if not os.path.isdir( defaultShaderDirectory ) :
raise
shaderBookmarks.setDefault( defaultShaderDirectory )
| {
"content_hash": "2d59261f00c8e26b30e452eb6835584b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 38.21739130434783,
"alnum_prop": 0.7656427758816837,
"repo_name": "chippey/gaffer",
"id": "e999092be3749204a8f88127039d21dedd9ae161",
"size": "2682",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "startup/gui/bookmarks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2258"
},
{
"name": "C++",
"bytes": "5420141"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Objective-C",
"bytes": "2228"
},
{
"name": "Python",
"bytes": "5348174"
},
{
"name": "Shell",
"bytes": "8370"
},
{
"name": "Slash",
"bytes": "41159"
}
],
"symlink_target": ""
} |
"""First simple sklearn classifier"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import argparse
import os
import learn1
from matplotlib import pyplot as plt
import Levenshtein # via https://pypi.python.org/pypi/python-Levenshtein/
from collections import Counter
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple visualisations of the test/train data')
parser.add_argument('table', help='Name of in and out of class data to read (e.g. scikit_testtrain_apple)')
args = parser.parse_args()
data_dir = "data"
in_class_name = os.path.join(data_dir, args.table + '_in_class.csv')
out_class_name = os.path.join(data_dir, args.table + '_out_class.csv')
in_class_lines = learn1.reader(in_class_name)
out_class_lines = learn1.reader(out_class_name)
if True:
# investigate most frequently repeated tweets in each class
c_in = Counter(in_class_lines)
c_out = Counter(out_class_lines)
# some hard-coded display routines for playing with the data...
if False:
plt.figure()
plt.ion()
if False: # histogram of tweet lengths
lengths_in_class = [len(s) for s in in_class_lines]
lengths_out_class = [len(s) for s in out_class_lines]
plt.title("Histogram of tweet lengths for classes in " + args.table)
plt.xlabel("Bins of tweet lengths")
plt.ylabel("Counts")
tweet_lengths = (0, 140)
filename_pattern = "histogram_tweet_lengths_{}.png"
# note - tried counting spaces with s.count(" ") but this seems to mirror
# tweet-length
if True: # counting number of capital letters
lengths_in_class = [Levenshtein.hamming(s, s.lower()) for s in in_class_lines]
lengths_out_class = [Levenshtein.hamming(s, s.lower()) for s in out_class_lines]
plt.title("Histogram of number of capitals for classes in " + args.table)
tweet_lengths = (0, 40)
filename_pattern = "nbr_capitals_{}.png"
plt.hist(lengths_in_class, range=tweet_lengths, color="blue", label="in-class", histtype="step")
plt.hist(lengths_out_class, range=tweet_lengths, color="green", label="out-class", histtype="step")
UPPER_LEFT = 2
plt.legend(loc=UPPER_LEFT)
plt.savefig(filename_pattern.format(args.table))
| {
"content_hash": "dd8025788cac8c22d65a1860a72d7061",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 111,
"avg_line_length": 49.092592592592595,
"alnum_prop": 0.6529611467370804,
"repo_name": "ianozsvald/social_media_brand_disambiguator",
"id": "ae2c2cff993a55bd7984b0a26d6c88f36b31f16d",
"size": "2697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualisations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89560"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from ...command import SubCommand
from ...wsgi import WSGIApplication
from ...console import Cell
import sys
class Libs(SubCommand):
"""List libraries installed in the project"""
help = "get library information"
def add_arguments(self, parser):
parser.add_argument(
"-l",
"--location",
dest="location",
default=None,
metavar="PATH",
help="location of the Moya server code",
)
parser.add_argument(
"-i",
"--ini",
dest="settings",
default=None,
metavar="SETTINGSPATH",
help="path to projects settings file",
)
parser.add_argument(
"--org",
dest="org",
default=None,
metavar="ORGANIZATION",
help="show only libraries with from a specific organization",
)
parser.add_argument(
"-f",
"--freeze",
dest="freeze",
action="store_true",
help="output project library requirements",
)
return parser
def run(self):
args = self.args
application = WSGIApplication(
self.location,
self.get_settings(),
disable_autoreload=True,
master_settings=self.master_settings,
)
archive = application.archive
table = []
if args.org:
prefix = args.org.lstrip(".") + "."
else:
prefix = None
libs = sorted(archive.libs.values(), key=lambda lib: lib.long_name)
if prefix is not None:
libs = [lib for lib in libs if lib.long_name.startswith(prefix)]
if args.freeze:
lib_freeze = (
"\n".join("{}=={}".format(lib.long_name, lib.version) for lib in libs)
+ "\n"
)
sys.stdout.write(lib_freeze)
return 0
for lib in libs:
name = lib.long_name
if prefix is not None and not name.startswith(prefix):
continue
table.append(
[
name,
Cell(lib.version, bold=True, fg="magenta"),
Cell(lib.install_location, bold=True, fg="blue"),
]
)
self.console.table(table, header_row=["lib", "version", "location"])
| {
"content_hash": "0dd12027d827b5fbb73d6938b015d1da",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 86,
"avg_line_length": 28.885057471264368,
"alnum_prop": 0.4998010346199761,
"repo_name": "moyaproject/moya",
"id": "d4c04946ddec82b20b426d547a7f019e73314408",
"size": "2513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moya/command/sub/libs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "662"
},
{
"name": "CSS",
"bytes": "98490"
},
{
"name": "Genshi",
"bytes": "949"
},
{
"name": "HTML",
"bytes": "14279826"
},
{
"name": "JavaScript",
"bytes": "369773"
},
{
"name": "Myghty",
"bytes": "774"
},
{
"name": "Python",
"bytes": "1828220"
},
{
"name": "Shell",
"bytes": "165"
},
{
"name": "Smalltalk",
"bytes": "154"
}
],
"symlink_target": ""
} |
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework_jwt.views import refresh_jwt_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/refresh_token/', refresh_jwt_token),
url(r'^api/login/', include('rest_social_auth.urls_jwt')),
url(r'^api/bank_accounts/', include('bank_accounts.urls',
namespace='bank-accounts-api')),
]
| {
"content_hash": "5e2cec616ac5091cd11a033cfb0d83de",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 39.96296296296296,
"alnum_prop": 0.6746987951807228,
"repo_name": "krayevidi/IBANapp",
"id": "960391b616df3bc9c5b2053d04603ad616202792",
"size": "1079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/backend/backend/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115"
},
{
"name": "HTML",
"bytes": "11492"
},
{
"name": "JavaScript",
"bytes": "28229"
},
{
"name": "Python",
"bytes": "20902"
}
],
"symlink_target": ""
} |
"""
Coin Toss Odds Ratio
--------------------
Figure 5.1
Odds ratio for two models, :math:`O_{21}`, describing coin tosses (eq. 5.26).
Out of N tosses (left: N = 10; right: N = 20), k tosses are heads. Model 2 is
a one-parameter model with the heads probability determined from data
(:math:`b^0 = k/N`), and model 1 claims an a priori known heads probability
equal to :math:`b_*`.The results are shown for two values of :math:`b_*`,
as indicated in the legend. Note that the odds ratio is minimized and below 1
(model 1 wins) when :math:`k = b_* N`.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy import integrate
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
@np.vectorize
def odds_ratio(n, k, bstar):
"""Odds ratio between M_2, where the heads probability is unknown,
and M_1, where the heads probability is known to be `bstar`, evaluated
in the case of `k` heads observed in `n` tosses.
Eqn. 5.25 in the text
"""
factor = 1. / (bstar ** k * (1 - bstar) ** (n - k))
f = lambda b: b ** k * (1 - b) ** (n - k)
return factor * integrate.quad(f, 0, 1)[0]
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.13, right=0.95, wspace=0.05, bottom=0.15)
subplots = [121, 122]
n_array = [10, 20]
linestyles = ['-k', '--b']
bstar_array = [0.5, 0.1]
for subplot, n in zip(subplots, n_array):
ax = fig.add_subplot(subplot, yscale='log')
k = np.arange(n + 1)
# plot curves for two values of bstar
for ls, bstar in zip(linestyles, bstar_array):
ax.plot(k, odds_ratio(n, k, bstar), ls,
label=r'$b^* = %.1f$' % bstar)
if subplot == 121:
ax.set_xlim(0, n - 0.01)
ax.set_ylabel(r'$O_{21}$')
ax.legend(loc=2)
else:
ax.set_xlim(0, n)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel('$k$')
ax.set_title('$n = %i$' % n)
ax.set_ylim(8E-2, 1E3)
ax.xaxis.set_major_locator(plt.MultipleLocator(n / 5))
ax.grid()
plt.show()
| {
"content_hash": "25e2db5c3159642a544fc35e9c7484ce",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 34.25609756097561,
"alnum_prop": 0.6169455322178711,
"repo_name": "nhuntwalker/astroML",
"id": "701ba2849796d9e6132d1a7b9ec07ff5c0537335",
"size": "2809",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "book_figures/chapter5/fig_odds_ratio_coin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "696"
},
{
"name": "Python",
"bytes": "1084793"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
def _plot_item(W, name, full_name, nspaces):
plt.figure()
if W.shape == ():
print(name, ": ", W)
elif W.shape[0] == 1:
plt.stem(W.T)
plt.title(full_name)
elif W.shape[1] == 1:
plt.stem(W)
plt.title(full_name)
else:
plt.imshow(np.abs(W), interpolation='nearest', cmap='jet');
plt.colorbar()
plt.title(full_name)
def all_plot(d, full_name="", exclude="", nspaces=0):
"""Recursively plot all the LFADS model parameters in the nested
dictionary."""
for k, v in d.iteritems():
this_name = full_name+"/"+k
if isinstance(v, dict):
all_plot(v, full_name=this_name, exclude=exclude, nspaces=nspaces+4)
else:
if exclude == "" or exclude not in this_name:
_plot_item(v, name=k, full_name=full_name+"/"+k, nspaces=nspaces+4)
def plot_priors():
g0s_prior_mean_bxn = train_modelvals['prior_g0_mean']
g0s_prior_var_bxn = train_modelvals['prior_g0_var']
g0s_post_mean_bxn = train_modelvals['posterior_g0_mean']
g0s_post_var_bxn = train_modelvals['posterior_g0_var']
plt.figure(figsize=(10,4), tight_layout=True);
plt.subplot(1,2,1)
plt.hist(g0s_post_mean_bxn.flatten(), bins=20, color='b');
plt.hist(g0s_prior_mean_bxn.flatten(), bins=20, color='g');
plt.title('Histogram of Prior/Posterior Mean Values')
plt.subplot(1,2,2)
plt.hist((g0s_post_var_bxn.flatten()), bins=20, color='b');
plt.hist((g0s_prior_var_bxn.flatten()), bins=20, color='g');
plt.title('Histogram of Prior/Posterior Log Variance Values')
plt.figure(figsize=(10,10), tight_layout=True)
plt.subplot(2,2,1)
plt.imshow(g0s_prior_mean_bxn.T, interpolation='nearest', cmap='jet')
plt.colorbar(fraction=0.025, pad=0.04)
plt.title('Prior g0 means')
plt.subplot(2,2,2)
plt.imshow(g0s_post_mean_bxn.T, interpolation='nearest', cmap='jet')
plt.colorbar(fraction=0.025, pad=0.04)
plt.title('Posterior g0 means');
plt.subplot(2,2,3)
plt.imshow(g0s_prior_var_bxn.T, interpolation='nearest', cmap='jet')
plt.colorbar(fraction=0.025, pad=0.04)
plt.title('Prior g0 variance Values')
plt.subplot(2,2,4)
plt.imshow(g0s_post_var_bxn.T, interpolation='nearest', cmap='jet')
plt.colorbar(fraction=0.025, pad=0.04)
plt.title('Posterior g0 variance Values')
plt.figure(figsize=(10,5))
plt.stem(np.sort(np.log(g0s_post_mean_bxn.std(axis=0))));
plt.title('Log standard deviation of h0 means');
def plot_time_series(vals_bxtxn, bidx=None, n_to_plot=np.inf, scale=1.0,
color='r', title=None):
if bidx is None:
vals_txn = np.mean(vals_bxtxn, axis=0)
else:
vals_txn = vals_bxtxn[bidx,:,:]
T, N = vals_txn.shape
if n_to_plot > N:
n_to_plot = N
plt.plot(vals_txn[:,0:n_to_plot] + scale*np.array(range(n_to_plot)),
color=color, lw=1.0)
plt.axis('tight')
if title:
plt.title(title)
def plot_lfads_timeseries(data_bxtxn, model_vals, ext_input_bxtxi=None,
truth_bxtxn=None, bidx=None, output_dist="poisson",
conversion_factor=1.0, subplot_cidx=0,
col_title=None):
n_to_plot = 10
scale = 1.0
nrows = 7
plt.subplot(nrows,2,1+subplot_cidx)
if output_dist == 'poisson':
rates = means = conversion_factor * model_vals['output_dist_params']
plot_time_series(rates, bidx, n_to_plot=n_to_plot, scale=scale,
title=col_title + " rates (LFADS - red, Truth - black)")
elif output_dist == 'gaussian':
means_vars = model_vals['output_dist_params']
means, vars = np.split(means_vars,2, axis=2) # bxtxn
stds = np.sqrt(vars)
plot_time_series(means, bidx, n_to_plot=n_to_plot, scale=scale,
title=col_title + " means (LFADS - red, Truth - black)")
plot_time_series(means+stds, bidx, n_to_plot=n_to_plot, scale=scale,
color='c')
plot_time_series(means-stds, bidx, n_to_plot=n_to_plot, scale=scale,
color='c')
else:
assert 'NIY'
if truth_bxtxn is not None:
plot_time_series(truth_bxtxn, bidx, n_to_plot=n_to_plot, color='k',
scale=scale)
input_title = ""
if "controller_outputs" in model_vals.keys():
input_title += " Controller Output"
plt.subplot(nrows,2,3+subplot_cidx)
u_t = model_vals['controller_outputs'][0:-1]
plot_time_series(u_t, bidx, n_to_plot=n_to_plot, color='c', scale=1.0,
title=col_title + input_title)
if ext_input_bxtxi is not None:
input_title += " External Input"
plot_time_series(ext_input_bxtxi, n_to_plot=n_to_plot, color='b',
scale=scale, title=col_title + input_title)
plt.subplot(nrows,2,5+subplot_cidx)
plot_time_series(means, bidx,
n_to_plot=n_to_plot, scale=1.0,
title=col_title + " Spikes (LFADS - red, Spikes - black)")
plot_time_series(data_bxtxn, bidx, n_to_plot=n_to_plot, color='k', scale=1.0)
plt.subplot(nrows,2,7+subplot_cidx)
plot_time_series(model_vals['factors'], bidx, n_to_plot=n_to_plot, color='b',
scale=2.0, title=col_title + " Factors")
plt.subplot(nrows,2,9+subplot_cidx)
plot_time_series(model_vals['gen_states'], bidx, n_to_plot=n_to_plot,
color='g', scale=1.0, title=col_title + " Generator State")
if bidx is not None:
data_nxt = data_bxtxn[bidx,:,:].T
params_nxt = model_vals['output_dist_params'][bidx,:,:].T
else:
data_nxt = np.mean(data_bxtxn, axis=0).T
params_nxt = np.mean(model_vals['output_dist_params'], axis=0).T
if output_dist == 'poisson':
means_nxt = params_nxt
elif output_dist == 'gaussian': # (means+vars) x time
means_nxt = np.vsplit(params_nxt,2)[0] # get means
else:
assert "NIY"
plt.subplot(nrows,2,11+subplot_cidx)
plt.imshow(data_nxt, aspect='auto', interpolation='nearest')
plt.title(col_title + ' Data')
plt.subplot(nrows,2,13+subplot_cidx)
plt.imshow(means_nxt, aspect='auto', interpolation='nearest')
plt.title(col_title + ' Means')
def plot_lfads(train_bxtxd, train_model_vals,
train_ext_input_bxtxi=None, train_truth_bxtxd=None,
valid_bxtxd=None, valid_model_vals=None,
valid_ext_input_bxtxi=None, valid_truth_bxtxd=None,
bidx=None, cf=1.0, output_dist='poisson'):
# Plotting
f = plt.figure(figsize=(18,20), tight_layout=True)
plot_lfads_timeseries(train_bxtxd, train_model_vals,
train_ext_input_bxtxi,
truth_bxtxn=train_truth_bxtxd,
conversion_factor=cf, bidx=bidx,
output_dist=output_dist, col_title='Train')
plot_lfads_timeseries(valid_bxtxd, valid_model_vals,
valid_ext_input_bxtxi,
truth_bxtxn=valid_truth_bxtxd,
conversion_factor=cf, bidx=bidx,
output_dist=output_dist,
subplot_cidx=1, col_title='Valid')
# Convert from figure to an numpy array width x height x 3 (last for RGB)
f.canvas.draw()
data = np.fromstring(f.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data_wxhx3 = data.reshape(f.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data_wxhx3
| {
"content_hash": "f7bdaf7d52487b94cff131e23b5a9934",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 35.89903846153846,
"alnum_prop": 0.6215347529128163,
"repo_name": "unnikrishnankgs/va",
"id": "b4ebba9f489b38de4b4f1dd69bcae45206c9fbf6",
"size": "8146",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/tensorflow/models/lfads/plot_lfads.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1836035"
},
{
"name": "C++",
"bytes": "12002305"
},
{
"name": "CMake",
"bytes": "128"
},
{
"name": "CSS",
"bytes": "64776"
},
{
"name": "Cuda",
"bytes": "78890"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "297329"
},
{
"name": "JavaScript",
"bytes": "4313047"
},
{
"name": "Jupyter Notebook",
"bytes": "603900"
},
{
"name": "Makefile",
"bytes": "7573"
},
{
"name": "Nginx",
"bytes": "544"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "PureBasic",
"bytes": "134"
},
{
"name": "Python",
"bytes": "51104955"
},
{
"name": "Shell",
"bytes": "71646"
},
{
"name": "Smarty",
"bytes": "28890"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from employee import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'hrms.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.dashboard.as_view(), name='dashboard'),
url(r'^requisition$', views.requisition.as_view(), name='requisition'),
url(r'^candidates$', views.candidates.as_view(), name='candidates'),
url(r'^intw-schedules$', views.intw_schedules.as_view(), name='candidates'),
url(r'^employees$', views.employees.as_view(), name='employees'),
url(r'^leave$', views.leave.as_view(), name='leave'),
url(r'^apply_leave$', views.apply_leave.as_view(), name='apply_leave'),
) | {
"content_hash": "e4fdc6e027d5da6c6ff1bf2dab35ad41",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 45.375,
"alnum_prop": 0.6363636363636364,
"repo_name": "vjega/pyhrms",
"id": "17b1e928b7b2a9f93f8b8da900a409279c15837e",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "employee/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "436612"
},
{
"name": "Python",
"bytes": "15856"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from servicerating.models import (
Contact, Conversation, Response, UserAccount, Extra)
from control.actions import export_select_fields_csv_action
class ContactAdmin(admin.ModelAdmin):
actions = [export_select_fields_csv_action(
"Export selected objects as CSV file",
fields=[
("contact", "Contact"),
("key", "Key"),
("value", "Value"),
("msisdn", "MSISDN"),
("created_at", "Created At"),
("updated_at", "Updated At"),
],
header=True
)]
class ConversationAdmin(admin.ModelAdmin):
actions = [export_select_fields_csv_action(
"Export selected objects as CSV file",
fields=[
("user_account", "User Account"),
("key", "Key"),
("name", "Name"),
("notes", "Notes"),
("created_at", "Created At"),
("updated_at", "Updated At"),
],
header=True
)]
class UserAccountAdmin(admin.ModelAdmin):
actions = [export_select_fields_csv_action(
"Export selected objects as CSV file",
fields=[
("key", "Key"),
("name", "Name"),
("notes", "Notes"),
("created_at", "Created At"),
("updated_at", "Updated At"),
],
header=True
)]
class ExtraAdmin(admin.ModelAdmin):
actions = [export_select_fields_csv_action(
"Export selected objects as CSV file",
fields=[
("contact", "Contact"),
("key", "Key"),
("value", "Value"),
("created_at", "Created At"),
("updated_at", "Updated At"),
],
header=True
)]
admin.site.register(Contact, ContactAdmin)
admin.site.register(Conversation, ConversationAdmin)
admin.site.register(Response)
admin.site.register(UserAccount, UserAccountAdmin)
admin.site.register(Extra, ExtraAdmin)
| {
"content_hash": "a3a54b49cb99cab82d6220c7f9ca2065",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 59,
"avg_line_length": 28.3768115942029,
"alnum_prop": 0.54902962206333,
"repo_name": "praekelt/ndoh-control",
"id": "e6c2847d852031e079bedd7999d089bde4bef853",
"size": "1958",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "servicerating/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19562"
},
{
"name": "HTML",
"bytes": "32320"
},
{
"name": "JavaScript",
"bytes": "65518"
},
{
"name": "Nginx",
"bytes": "777"
},
{
"name": "Python",
"bytes": "553807"
},
{
"name": "Shell",
"bytes": "465"
}
],
"symlink_target": ""
} |
def get_lastday_visit_voluem():
pass
def get_last7day_visit_tendency_img():
pass | {
"content_hash": "44faf7abd93bd6460dfc1c966475b28a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 15,
"alnum_prop": 0.6888888888888889,
"repo_name": "lwz7512/logtoeye",
"id": "3d42268e4d6169af3442e4b7fa6af757ee705afe",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26437"
},
{
"name": "JavaScript",
"bytes": "166295"
},
{
"name": "Python",
"bytes": "57613"
}
],
"symlink_target": ""
} |
"""
Temporal propagation of the component: Take a component at birth time
(time 0) and propagate it for its age t to the current position.
trace_epicyclic_orbit traces a 6D point
trace_epicyclic_covmatrix traces covariance matrix
"""
import numpy as np
def convert_cart2curvilin(data, ro=8., vo=220.):
"""
Converts cartesian coordinates XYZUVW (given with respect to the
LSR) to the curvilinear system.
Curvilinear system is corotating so its radial component xi is
always pointing towards the galactic center.
Coordinates in the curvilinear system are [xi, eta, zeta, xidot, etadot, zetadot].
Parameters
----------
data: [6, (npoints)] float np.array
[pc, pc, pc, km/s,km/s,km/s] --> TODO: Change this to pc/Myr
[X, Y, Z, U, V, W]
Returns
-------
curvilin_coord: [6, (npoints)] float np.array
xi : radial distance from the origin in LSR
eta :
zeta : vertical distance from plane
xidot :
etadot :
zetadot:
"""
#~ data = np.array(data)
X, Y, Z, U, V, W = data.T
# TODO: to this conversion at the upper level
R0 = ro*1000.0 # pc
Omega0 = vo/R0 # km/s / pc # This could also be done earlier
# Place the velocities in a rotating frame
U = U - Y*Omega0
V = V + X*Omega0
R = np.sqrt(Y**2 + (R0-X)**2)
phi = np.arctan2(Y, R0-X)
xi = R0-R
eta = phi*R0
zeta = Z
xidot = U*np.cos(phi) - V*np.sin(phi)
etadot = R0/R * (V*np.cos(phi) + U*np.sin(phi))
zetadot = W
curvilin_coord = np.array([xi, eta, zeta, xidot, etadot, zetadot])
return curvilin_coord.T
def convert_curvilin2cart(data, ro=8., vo=220.):
"""
Converts curvilinear coordinates [xi, eta, zeta, xidot, etadot,
zetadot] to cartesian coordinates XYZUVW (given with respect to the
LSR).
Curvilinear system is corotating so its radial component xi is
always pointing towards the galactic center.
Parameters
----------
data: [6, (npoints)] float np.array
[pc, pc, pc, km/s,km/s,km/s] --> TODO: Change this to pc/Myr
[xi, eta, zeta, xidot, etadot, zetadot]
Returns
-------
cart_coordinates: [6, (npoints)] float np.array
[pc, pc, pc, km/s,km/s,km/s] --> TODO: Change this to pc/Myr
[X, Y, Z, U, V, W]
"""
xi, eta, zeta, xidot, etadot, zetadot = data.T
# Todo: compute this at the upper level
R0 = ro*1000.0
R = R0 - xi
phi = eta/R0
X = xi*np.cos(phi) + R0*(1.0-np.cos(phi)) #R0 - R*np.cos(phi)
Y = R*np.sin(phi)
Z = zeta
U = xidot*np.cos(phi) + R/R0*etadot*np.sin(phi)
V = - xidot*np.sin(phi) + R/R0*etadot*np.cos(phi)
W = zetadot
# Convert to a non-rotating observed frame
Omega0 = vo/R0 # km/s / pc
U = U + Y*Omega0
V = V - X*Omega0
cart_coordinates = np.array([X, Y, Z, U, V, W])
return cart_coordinates.T
def epicyclic_approx(data, times=None, sA=0.89, sB=1.15, sR=1.21):
"""
Epicyclic approximation following the Makarov et al. 2004 paper
in the curvilinear coordinate system:
The radial component xi is pointing towards the Galactic center
at all times and equals 0 at R0.
The circular component eta circles around the Galaxy; eta = phi*R.
The vertical component is defined as a displacement from the Galactic plane.
This approximation works close to the LSR.
Parameters
------------
data : [pc, pc, pc, km/s, km/s, km/s] # *parsecs in the eta component are scales parsecs...
xi, eta, zeta, xidot, etadot, zetadot
"""
xi0, eta0, zeta0, xidot0, etadot0, zetadot0 = data.T
# Bovy 2017
#~ A0 = 15.3 # km/s/kpc
#~ B0 = -11.9 # km/s/kpc
# Unit conversion: convert from km/s/kpc to Myr-1
#~ A = A0 * 0.0010227121650537077 # Myr-1
#~ B = B0 * 0.0010227121650537077 # Myr-1
# Bovy 2017, converted from km/s/kpc to Myr-1
# TODO: Put this (both Oort's constants and the scaling factors) on the input params list. This is where all the conversions should be done, too.
A0 = 0.01564749613 # 15.3 km/s/kpc * 0.0010227121650537077 = 0.01564749613 Myr-1
B0 = -0.01217027476 # -11.9 km/s/kpc * 0.0010227121650537077 = -0.01217027476 Myr-1
# Bovy 2017. Scale factors to match MW2014.
# Mike's scale factors
#A0 = 0.97*15.3 # km/s/kpc
#B0 = 1.12*(-11.9) # km/s/kpc
# Marusa's factors TODO: possibly do this at a higher level so this multiplication is not repeated every single time
#~ A0 = A0*sA # km/s/kpc
#~ B0 = B0*sB # km/s/kpc
A = A0*sA # km/s/kpc
B = B0*sB # km/s/kpc
# Fine tuning rho. TODO: do this at a higher level so it is not repeated every single time
rho_scale_factor = sR #1.36
rho = rho_scale_factor * 0.0889 # M0/pc3
Grho = rho * 0.004498502151575285 # Myr-2; rho should be given in M0/pc3
kappa = np.sqrt(-4.0 * B * (A - B)) # Myr-1
nu = np.sqrt(4.0 * np.pi * Grho + (A + B) * (A - B)) # Myr-1
t=times
kt=kappa*t
nt=nu*t
# Propagate positions
xi = xi0 + xidot0/kappa*np.sin(kt) + (etadot0 - 2.0*A*xi0) *\
(1.0 - np.cos(kt)) / (2.0*B)
eta = eta0 - xidot0 * (1.0 - np.cos(kt)) / (2.0*B) + etadot0 *\
(A*kt - (A-B)*np.sin(kt)) /\
(kappa*B) - xi0 * 2.0*A*(A-B)*(kt-np.sin(kt)) / (kappa*B)
zeta = zeta0*np.cos(nt) + zetadot0/nu*np.sin(nt)
# Propagate velocities
xidot = xidot0*np.cos(kt) + (etadot0 - 2.0*A*xi0) *\
kappa*np.sin(kt) / (2.0*B)
etadot = -xidot0*kappa/(2.0*B)*np.sin(kt) +\
etadot0/B*(A-(A-B)*np.cos(kt)) -\
2.0*A*xi0*(A-B)*(1.0-np.cos(kt))/B
zetadot = -zeta0*nu*np.sin(nt) + zetadot0*np.cos(nt)
# Assemble the array with results
new_position = np.array([xi, eta, zeta, xidot, etadot, zetadot])
new_position = new_position.T
return new_position
def trace_epicyclic_orbit(xyzuvw_start, times=None, sA=0.89, sB=1.15,
sR=1.21, ro=8., vo=220., single_age=True):
"""
Given a star's XYZUVW relative to the LSR (at any time), project its
orbit forward (or backward) to each of the times listed in *times*
using epicyclic approximation. This only works close to the LSR.
Positive times --> traceforward
Negative times --> traceback
Parameters
----------
xyzuvw : [pc, pc, pc, pc/Myr, pc/Myr, pc/Myr]
times : (float) or ([ntimes] float array)
Myr - time of 0.0 must be present in the array. Times need not be #TODO: time 0.0 really? [TC: this was true for galpy]
spread linearly.
single_age: (bool) {True}
Set this flag if only providing a single age to trace to
This is there for the plotting purposes.
sA, sB, sR: fine tuning factors for epicyclic approx. This works for
Sco-Cen, but not sure about other regions. Also, these are
determined for LSR with the wrong signs... TODO!!
Returns
-------
xyzuvw_tf : [ntimes, 6] array
[pc, pc, pc, km/s, km/s, km/s] - the traced orbit with positions
and velocities
"""
# unnecessary if sentence
if single_age:
# replace 0 with some tiny number
try:
if times == 0.:
times = 1e-15
# times = np.array([0., times])
except ValueError as err:
if not err.args:
err.args = ('',)
err.args = err.args + ('WARNING: comparing array to float? '
'Did you leave single_age as True?',)
raise
else:
raise UserWarning('Multi age orbit integation no longer supported')
times = np.array(times)
#~ times = np.array([0, age])
# Make sure numbers are floats, and reshape into 2d
xyzuvw_start = np.atleast_2d(xyzuvw_start.astype(np.float))
# Units: Velocities are in km/s, convert into pc/Myr
xyzuvw_start[:,3:] = xyzuvw_start[:,3:] * 1.0227121650537077 # pc/Myr
#~ xyzuvw_start[3:] = xyzuvw_start[3:] * 1.0227121650537077 # pc/Myr
# Transform to curvilinear
curvilin = convert_cart2curvilin(xyzuvw_start, ro=ro, vo=vo)
# Trace orbit with epicyclic approx.
new_position = epicyclic_approx(curvilin, times=times, sA=sA, sB=sB,
sR=sR)
#~ print('new_position')
#~ print(new_position)
# Transform back to cartesian
xyzuvw_new = convert_curvilin2cart(new_position, ro=ro, vo=vo)
# Units: Transform velocities from pc/Myr back to km/s
xyzuvw_new[:,3:] /= 1.0227121650537077
#~ xyzuvw_new[3:] /= 1.0227121650537077
#~ return xyzuvw_new
# Remove empty dimensions
return np.squeeze(xyzuvw_new)
def calc_jacobian_epicyclic(loc, dim=6, h=1e-3, age=None):
"""
Calculate the Jacobian of the coordinate transfromation
`trans_func` about `loc`.
`trans_func` should take a vector of dimension `dim` to a new vector
of same dimension. This function then calculates the 2nd order
partial derivative at point `loc`. Extra arguments for `trans_func`
can be provided as a tuple to `args`.
Parameters
----------
trans_func : function
Transformation function taking us from the initial coordinate
frame to the final coordinate frame
loc : [dim] float array
The position (in the initial coordinte frame) around which we
are calculating the jacobian
dim : int {6}
The dimensionality of the coordinate frames
h : float {1e-3}
The size of the increment, smaller values maybe run into
numerical issues
args : tuple {None}
Extra arguments required by `trans_func`
Returns
-------
jac : [dim,dim] float array
A jacobian matrix
Notes
-----
OPTIMISATION TARGET
The application of `trans_func` is the bottleneck of Chronostar
(at least when `trans_func` is traceorbit.trace_cartesian_orbit).
Since this is a loop, there is scope for parallelisation.
"""
#~ print('args', args)
jac = np.zeros((dim, dim))
# Even with epicyclic, this constitutes 90% of chronostar work
# so, we pass all 12 required positions to the trans_func as
# one array, to exploit numpy's faster array operations
start_pos = []
for i in range(dim):
offset = np.zeros(dim)
offset[i] = h
loc_pl = loc + offset
loc_mi = loc - offset
start_pos.append(loc_pl)
start_pos.append(loc_mi)
start_pos = np.array(start_pos)
final_pos = trace_epicyclic_orbit(start_pos, times=age)
print('temporal_propagation_marusa')
for i in range(dim):
jac[:,i] = (final_pos[2*i] - final_pos[2*i + 1]) / (2*h)
print(final_pos[2*i], final_pos[2*i + 1])
# for i in range(dim):
# jac[:,i] = calc_jacobian_column(trans_func, i, loc, dim, h, args)
return jac
def trace_epicyclic_covmatrix(cov, loc, dim=6, h=1e-3, age=None):
"""
This is 'transform.transform_covmatrix' from Tim's Chronostar.
Calculates covariance matrix of current day distribution.
Transforms a covariance matrix from one coordinate frame to another.
Calculated as a first-order Taylor approximation of the coordinate
transformation that takes the initial mean to the current day mean.
This is the most expensive aspect of Chronostar, so we first make
sure the covariance matrix hasn't already been projected.
Parameters
----------
cov : [dim,dim] float array
Covariance matrix in the initial frame
trans_func : function
Transformation function taking us from the initial
coordinate frame to the final coordinate frame. Output must be
mutable, i.e. single value, or an array
loc : [dim] float array
The position (in the initial coordinate frame)
around which we are calculating the jacobian
(i.e. the mean, in the example of a Gaussian distribution)
dim : integer {6}
The dimensionality of the coordinate frame
h : float {1e-3}
The size of the increment, smaller values maybe run into
numerical issues
args : tuple
extra args to be passed to trans_func. E.g. for traceOrbitXYZUVW
args = (age,) [for traceforward] or args = (-age,)
[for traceback]
Returns
-------
conv_cov : [dim,dim] float array
The transformed covariance matrix
"""
jac = calc_jacobian_epicyclic(loc, age=age, dim=dim, h=h)
print('jac P')
print(jac)
cov_transformed = np.dot(jac, np.dot(cov, jac.T))
return cov_transformed
if __name__=='__main__':
import pickle
with open('../fastfit/data_for_testing/input_data_to_expectation.pkl', 'rb') as f:
input_data = pickle.load(f)
_, comps, _, _, _ = input_data
comp = comps[0]
# Component at time=0 and age
mean0 = comp.get_mean()
cov0 = comp.get_covmatrix()
age = comp.get_age()
#~ print('mean0')
#~ print(mean0)
# Tim's transformation
mean_now, cov_now = comp.get_currentday_projection()
# Testing transformation
#~ mean_now_test = trace_epicyclic_orbit(mean0, times=age)
#~ print('mean_now')
#~ print(mean_now)
#~ diff_mean = mean_now_test - mean_now
#~ mask = np.abs(diff_mean)>1e-8
#~ print('diff mean dimensions that differ', np.sum(mask))
#~ cov_now_test = trace_epicyclic_covmatrix(cov0, loc=mean0, age=age)
#~ diff_cov = cov_now_test - cov_now
#~ mask = np.abs(diff_mean)>1e-6
#~ print('diff cov now', np.sum(mask))
#~ from chronostar import transform
#~ from chronostar import traceorbit
#~ cov_now1 = transform.transform_covmatrix(cov0,
#~ traceorbit.trace_epicyclic_orbit, mean0, args=(age,))
#~ mean_now1 = traceorbit.trace_epicyclic_orbit(mean0, times=age)
# TEST convert_cart2curvilin
curvilin = convert_cart2curvilin(mean0)
#~ print('curvilin')
#~ print(curvilin)
new_position = epicyclic_approx(curvilin, times=age)
#~ print('new_position')
#~ print(new_position)
| {
"content_hash": "4fbcbc0715bcb6fcc714f3e9594b2938",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 149,
"avg_line_length": 31.505567928730514,
"alnum_prop": 0.6117630425561996,
"repo_name": "mikeireland/chronostar",
"id": "0de4d9bd0a1e91696ec7d3f7449c5a0a4be23450",
"size": "14146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chronostar/run_em_files_python/temporal_propagation_marusa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "308199"
},
{
"name": "C++",
"bytes": "2106"
},
{
"name": "Makefile",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "1116075"
},
{
"name": "SWIG",
"bytes": "4608"
},
{
"name": "Shell",
"bytes": "1163"
}
],
"symlink_target": ""
} |
import abc
import os
import uuid
from oslo_config import cfg
import six
from sahara import conductor as c
from sahara import context
from sahara.service.edp import base_engine
from sahara.service.edp.binary_retrievers import dispatch
from sahara.service.edp import hdfs_helper as h
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import oozie as o
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.validations.edp import job_execution as j
from sahara.utils import edp
from sahara.utils import remote
from sahara.utils import xmlutils as x
CONF = cfg.CONF
conductor = c.API
@six.add_metaclass(abc.ABCMeta)
class OozieJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
self.plugin = job_utils.get_plugin(self.cluster)
def _get_client(self):
return o.OozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster))
def _get_oozie_job_params(self, hdfs_user, path_to_workflow, oozie_params,
use_hbase_lib):
app_path = "oozie.wf.application.path"
oozie_libpath_key = "oozie.libpath"
oozie_libpath = ""
rm_path = self.get_resource_manager_uri(self.cluster)
nn_path = self.get_name_node_uri(self.cluster)
hbase_common_lib_path = "%s%s" % (nn_path, h.HBASE_COMMON_LIB_PATH)
if use_hbase_lib:
if oozie_libpath_key in oozie_params:
oozie_libpath = "%s,%s" % (oozie_params.get(oozie_libpath_key,
""), hbase_common_lib_path)
else:
oozie_libpath = hbase_common_lib_path
job_parameters = {
"jobTracker": rm_path,
"nameNode": nn_path,
"user.name": hdfs_user,
oozie_libpath_key: oozie_libpath,
app_path: "%s%s" % (nn_path, path_to_workflow),
"oozie.use.system.libpath": "true"}
# Don't let the application path be overwritten, that can't
# possibly make any sense
if app_path in oozie_params:
del oozie_params[app_path]
if oozie_libpath_key in oozie_params:
del oozie_params[oozie_libpath_key]
job_parameters.update(oozie_params)
return job_parameters
def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "workflow.xml", job_dir, hdfs_user)
return "%s/workflow.xml" % job_dir
def cancel_job(self, job_execution):
if job_execution.oozie_job_id is not None:
client = self._get_client()
client.kill_job(job_execution)
return client.get_job_status(job_execution)
def get_job_status(self, job_execution):
if job_execution.oozie_job_id is not None:
return self._get_client().get_job_status(job_execution)
def run_job(self, job_execution):
ctx = context.ctx()
# This will be a dictionary of tuples, (native_url, runtime_url)
# keyed by data_source id
data_source_urls = {}
job = conductor.job_get(ctx, job_execution.job_id)
input_source, output_source = job_utils.get_data_sources(
job_execution, job, data_source_urls, self.cluster)
# Updated_job_configs will be a copy of job_execution.job_configs with
# any name or uuid references to data_sources resolved to paths
# assuming substitution is enabled.
# If substitution is not enabled then updated_job_configs will
# just be a reference to job_execution.job_configs to avoid a copy.
# Additional_sources will be a list of any data_sources found.
additional_sources, updated_job_configs = (
job_utils.resolve_data_source_references(job_execution.job_configs,
job_execution.id,
data_source_urls,
self.cluster)
)
job_execution = conductor.job_execution_update(
ctx, job_execution,
{"data_source_urls": job_utils.to_url_dict(data_source_urls)})
# Now that we've recorded the native urls, we can switch to the
# runtime urls
data_source_urls = job_utils.to_url_dict(data_source_urls,
runtime=True)
proxy_configs = updated_job_configs.get('proxy_configs')
configs = updated_job_configs.get('configs', {})
use_hbase_lib = configs.get('edp.hbase_common_lib', {})
# Extract all the 'oozie.' configs so that they can be set in the
# job properties file. These are config values for Oozie itself,
# not the job code
oozie_params = {}
for k in list(configs):
if k.startswith('oozie.'):
oozie_params[k] = configs[k]
for data_source in [input_source, output_source] + additional_sources:
if data_source and data_source.type == 'hdfs':
h.configure_cluster_for_hdfs(
self.cluster, data_source_urls[data_source.id])
break
external_hdfs_urls = self._resolve_external_hdfs_urls(
job_execution.job_configs)
for url in external_hdfs_urls:
h.configure_cluster_for_hdfs(self.cluster, url)
hdfs_user = self.get_hdfs_user()
# TODO(tmckay): this should probably be "get_namenode"
# but that call does not exist in the oozie engine api now.
oozie_server = self.get_oozie_server(self.cluster)
wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
proxy_configs)
wf_xml = workflow_factory.get_workflow_xml(
job, self.cluster, updated_job_configs,
input_source, output_source,
hdfs_user, data_source_urls)
path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
wf_xml, hdfs_user)
job_params = self._get_oozie_job_params(hdfs_user,
path_to_workflow,
oozie_params,
use_hbase_lib)
client = self._get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
client.run_job(job_execution, oozie_job_id)
try:
status = client.get_job_status(job_execution,
oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
@abc.abstractmethod
def get_hdfs_user(self):
pass
@abc.abstractmethod
def create_hdfs_dir(self, remote, dir_name):
pass
@abc.abstractmethod
def get_oozie_server_uri(self, cluster):
pass
@abc.abstractmethod
def get_oozie_server(self, cluster):
pass
@abc.abstractmethod
def get_name_node_uri(self, cluster):
pass
@abc.abstractmethod
def get_resource_manager_uri(self, cluster):
pass
def validate_job_execution(self, cluster, job, data):
# Shell job type requires no specific fields
if job.type == edp.JOB_TYPE_SHELL:
return
# All other types except Java require input and output
# objects and Java require main class
if job.type == edp.JOB_TYPE_JAVA:
j.check_main_class_present(data, job)
else:
j.check_data_sources(data, job)
job_type, subtype = edp.split_job_type(job.type)
if job_type == edp.JOB_TYPE_MAPREDUCE and (
subtype == edp.JOB_SUBTYPE_STREAMING):
j.check_streaming_present(data, job)
@staticmethod
def get_possible_job_config(job_type):
return workflow_factory.get_possible_job_config(job_type)
@staticmethod
def get_supported_job_types():
return [edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_PIG,
edp.JOB_TYPE_SHELL]
def _upload_job_files_to_hdfs(self, where, job_dir, job, configs,
proxy_configs=None):
mains = job.mains or []
libs = job.libs or []
builtin_libs = edp.get_builtin_binaries(job, configs)
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
job_dir_suffix = 'lib' if job.type != edp.JOB_TYPE_SHELL else ''
lib_dir = os.path.join(job_dir, job_dir_suffix)
with remote.get_remote(where) as r:
for main in mains:
raw_data = dispatch.get_raw_binary(
main, proxy_configs=proxy_configs, remote=r)
if isinstance(raw_data, dict) and raw_data["type"] == "path":
h.copy_from_local(r, raw_data['path'],
job_dir, hdfs_user)
else:
h.put_file_to_hdfs(r, raw_data, main.name,
job_dir, hdfs_user)
uploaded_paths.append(job_dir + '/' + main.name)
if len(libs) and job_dir_suffix:
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, lib_dir)
for lib in libs:
raw_data = dispatch.get_raw_binary(
lib, proxy_configs=proxy_configs, remote=remote)
if isinstance(raw_data, dict) and raw_data["type"] == "path":
h.copy_from_local(r, raw_data['path'],
lib_dir, hdfs_user)
else:
h.put_file_to_hdfs(r, raw_data, lib.name,
lib_dir, hdfs_user)
uploaded_paths.append(lib_dir + '/' + lib.name)
for lib in builtin_libs:
h.put_file_to_hdfs(r, lib['raw'], lib['name'], lib_dir,
hdfs_user)
uploaded_paths.append(lib_dir + '/' + lib['name'])
return uploaded_paths
def _create_hdfs_workflow_dir(self, where, job):
constructed_dir = '/user/%s/' % self.get_hdfs_user()
constructed_dir = self._add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, six.text_type(uuid.uuid4()))
with remote.get_remote(where) as r:
self.create_hdfs_dir(r, constructed_dir)
return constructed_dir
def _add_postfix(self, constructed_dir):
def _append_slash_if_needed(path):
if path[-1] != '/':
path += '/'
return path
constructed_dir = _append_slash_if_needed(constructed_dir)
if CONF.job_workflow_postfix:
constructed_dir = ''.join([str(constructed_dir),
str(CONF.job_workflow_postfix)])
return _append_slash_if_needed(constructed_dir)
def _resolve_external_hdfs_urls(self, job_configs):
external_hdfs_urls = []
for k, v in six.iteritems(job_configs.get('configs', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for k, v in six.iteritems(job_configs.get('params', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for v in job_configs.get('args', []):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
return external_hdfs_urls
| {
"content_hash": "f200533dfd54ec0abe0d8687c495d139",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 79,
"avg_line_length": 40.334426229508196,
"alnum_prop": 0.5645423508372622,
"repo_name": "zhangjunli177/sahara",
"id": "fa8f7d487e721238b5314881a79ff1337d8ee466",
"size": "12892",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sahara/service/edp/oozie/engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "29432"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3131969"
},
{
"name": "Shell",
"bytes": "60900"
}
],
"symlink_target": ""
} |
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import BitSendTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class MempoolCoinbaseTest(BitSendTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
'[email protected]', # immediate tx relay
],
[]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [self.nodes[0].getblockhash(n) for n in range(101, 105)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spend_101_raw = create_raw_transaction(self.nodes[0], coinbase_txids[1], node1_address, amount=49.99)
spend_102_raw = create_raw_transaction(self.nodes[0], coinbase_txids[2], node0_address, amount=49.99)
spend_103_raw = create_raw_transaction(self.nodes[0], coinbase_txids[3], node0_address, amount=49.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction(
inputs=[{
"txid": coinbase_txids[0],
"vout": 0,
}],
outputs={node0_address: 49.99},
locktime=self.nodes[0].getblockcount() + 2,
)
timelock_tx = self.nodes[0].signrawtransactionwithwallet(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_rpc_error(-26, 'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_raw_transaction(self.nodes[0], spend_102_id, node1_address, amount=49.98)
spend_103_1_raw = create_raw_transaction(self.nodes[0], spend_103_id, node1_address, amount=49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Sync blocks, so that peer 1 gets the block before timelock_tx
# Otherwise, peer 1 would put the timelock_tx in recentRejects
self.sync_all()
# Time-locked transaction can now be spent
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
self.sync_all()
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
self.sync_all()
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| {
"content_hash": "43a36b5fb031ca466b32f831c3e6025c",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 109,
"avg_line_length": 45.149532710280376,
"alnum_prop": 0.6472779962740633,
"repo_name": "LIMXTEC/BitSend",
"id": "81afebd1adb32d0c6a9574f68ba55bb98822a444",
"size": "5045",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.21-master",
"path": "test/functional/mempool_reorg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "4450630"
},
{
"name": "C++",
"bytes": "8023567"
},
{
"name": "CMake",
"bytes": "28560"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "214695"
},
{
"name": "Makefile",
"bytes": "117044"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2204293"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "7554"
},
{
"name": "Shell",
"bytes": "154029"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:8444")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| {
"content_hash": "1d6ea472c8f9a5e496ae93ea36ec7c54",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 39.5,
"alnum_prop": 0.759493670886076,
"repo_name": "scificrypto/Darsek",
"id": "da9355c52974af006fdaef84eb4fb0f410ee3fbf",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletunlock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "49947"
},
{
"name": "C",
"bytes": "33638"
},
{
"name": "C++",
"bytes": "2615493"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "14602"
},
{
"name": "NSIS",
"bytes": "6045"
},
{
"name": "Objective-C",
"bytes": "747"
},
{
"name": "Objective-C++",
"bytes": "2451"
},
{
"name": "OpenEdge ABL",
"bytes": "11855"
},
{
"name": "Python",
"bytes": "41583"
},
{
"name": "QMake",
"bytes": "15155"
},
{
"name": "Roff",
"bytes": "13286"
},
{
"name": "Shell",
"bytes": "8751"
}
],
"symlink_target": ""
} |
"""GeoJSON source for VTEC event"""
import json
import datetime
from pymemcache.client import Client
import psycopg2.extras
from paste.request import parse_formvars
from pyiem.util import get_dbconn, html_escape
ISO = "%Y-%m-%dT%H:%M:%SZ"
def run_lsrs(wfo, year, phenomena, significance, etn, sbw):
"""Do great things"""
pgconn = get_dbconn("postgis")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
sbwtable = f"sbw_{year}"
warningtable = f"warnings_{year}"
if sbw == 1:
sql = f"""
SELECT distinct l.*, valid at time zone 'UTC' as utc_valid,
ST_asGeoJson(l.geom) as geojson
from lsrs l, {sbwtable} w WHERE
l.geom && w.geom and ST_contains(w.geom, l.geom)
and l.wfo = %s and
l.valid >= w.issue and l.valid <= w.expire and
w.wfo = %s and w.eventid = %s and
w.significance = %s and w.phenomena = %s
ORDER by l.valid ASC
"""
args = (wfo, wfo, etn, significance, phenomena)
else:
sql = f"""
WITH countybased as (
SELECT min(issue) as issued, max(expire) as expired
from {warningtable} w JOIN ugcs u on (u.gid = w.gid)
WHERE w.wfo = %s and w.eventid = %s and
w.significance = %s
and w.phenomena = %s)
SELECT distinct l.*, valid at time zone 'UTC' as utc_valid,
ST_asGeoJson(l.geom) as geojson
from lsrs l, countybased c WHERE
l.valid >= c.issued and l.valid < c.expired and
l.wfo = %s ORDER by l.valid ASC
"""
args = (wfo, etn, significance, phenomena, wfo)
cursor.execute(sql, args)
res = {
"type": "FeatureCollection",
"features": [],
"generation_time": datetime.datetime.utcnow().strftime(
"%Y-%m-%dT%H:%M:%SZ"
),
"count": cursor.rowcount,
}
for row in cursor:
res["features"].append(
dict(
type="Feature",
properties=dict(
utc_valid=row["utc_valid"].strftime(ISO),
event=row["typetext"],
type=row["type"],
magnitude=row["magnitude"],
city=row["city"],
county=row["county"],
remark=row["remark"],
),
geometry=json.loads(row["geojson"]),
)
)
return json.dumps(res)
def run_sbw(wfo, year, phenomena, significance, etn):
"""Do great things"""
pgconn = get_dbconn("postgis")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
table = f"sbw_{year}"
cursor.execute(
f"""
SELECT
ST_asGeoJson(geom) as geojson,
issue at time zone 'UTC' as utc_issue,
init_expire at time zone 'UTC' as utc_init_expire
from {table}
WHERE wfo = %s and eventid = %s and phenomena = %s and significance = %s
and status = 'NEW'
""",
(wfo, etn, phenomena, significance),
)
res = {
"type": "FeatureCollection",
"features": [],
"generation_time": datetime.datetime.utcnow().strftime(
"%Y-%m-%dT%H:%M:%SZ"
),
"count": cursor.rowcount,
}
for row in cursor:
res["features"].append(
dict(
type="Feature",
properties=dict(
phenomena=phenomena, significance=significance, eventid=etn
),
geometry=json.loads(row["geojson"]),
)
)
return json.dumps(res)
def run(wfo, year, phenomena, significance, etn):
"""Do great things"""
pgconn = get_dbconn("postgis")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
table = f"warnings_{year}"
cursor.execute(
f"""
SELECT
w.ugc,
ST_asGeoJson(u.geom) as geojson,
issue at time zone 'UTC' as utc_issue,
init_expire at time zone 'UTC' as utc_init_expire
from {table} w JOIN ugcs u on (w.gid = u.gid)
WHERE w.wfo = %s and eventid = %s and
phenomena = %s and significance = %s
""",
(wfo, etn, phenomena, significance),
)
res = {
"type": "FeatureCollection",
"features": [],
"generation_time": datetime.datetime.utcnow().strftime(
"%Y-%m-%dT%H:%M:%SZ"
),
"count": cursor.rowcount,
}
for row in cursor:
res["features"].append(
dict(
type="Feature",
id=row["ugc"],
properties=dict(
phenomena=phenomena, significance=significance, eventid=etn
),
geometry=json.loads(row["geojson"]),
)
)
return json.dumps(res)
def application(environ, start_response):
"""Main()"""
headers = [("Content-type", "application/vnd.geo+json")]
form = parse_formvars(environ)
wfo = form.get("wfo", "MPX")
if len(wfo) == 4:
wfo = wfo[1:]
year = int(form.get("year", 2015))
phenomena = form.get("phenomena", "SV")[:2]
significance = form.get("significance", "W")[:1]
etn = int(form.get("etn", 1))
sbw = int(form.get("sbw", 0))
lsrs = int(form.get("lsrs", 0))
cb = form.get("callback", None)
mckey = (
f"/geojson/vtec_event/{wfo}/{year}/{phenomena}/{significance}/"
f"{etn}/{sbw}/{lsrs}"
)
mc = Client("iem-memcached:11211")
res = mc.get(mckey)
if not res:
if lsrs == 1:
res = run_lsrs(wfo, year, phenomena, significance, etn, sbw)
else:
if sbw == 1:
res = run_sbw(wfo, year, phenomena, significance, etn)
else:
res = run(wfo, year, phenomena, significance, etn)
mc.set(mckey, res, 3600)
else:
res = res.decode("utf-8")
mc.close()
if cb is not None:
res = f"{html_escape(cb)}({res})"
start_response("200 OK", headers)
return [res.encode("ascii")]
| {
"content_hash": "3bfea499b6e5b036710d04353263fbfa",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 30.63819095477387,
"alnum_prop": 0.5282926029194686,
"repo_name": "akrherz/iem",
"id": "86cacf2fca3c909d724edb29904b294db8ccde55",
"size": "6097",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "htdocs/geojson/vtec_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16912"
},
{
"name": "HTML",
"bytes": "1092923"
},
{
"name": "Hack",
"bytes": "7078"
},
{
"name": "JavaScript",
"bytes": "244253"
},
{
"name": "PHP",
"bytes": "3492474"
},
{
"name": "Python",
"bytes": "3279270"
},
{
"name": "Rich Text Format",
"bytes": "30075"
},
{
"name": "Shell",
"bytes": "72284"
}
],
"symlink_target": ""
} |
from robotide.version import VERSION
import docutils.core
docutils.core.publish_file(
source_path="../doc/releasenotes/ride-"+".".join([str(x) for x in VERSION.split('.')[:2]]).replace('v','')+".rst",
destination_path="../src/robotide/application/release_notes.html",
writer_name="html")
# Replace { by { and } by }
print("Now paste content of ../src/robotide/application/release_notes.html to"
" RELEASE_NOTES in ../src/robotide/application/releasenotes.py")
source_path = "../CHANGELOG.adoc"
directory = "../src/robotide/application"
destination_path = directory + "/CHANGELOG.html"
from subprocess import run
run(["a2x3", "-f", "xhtml", "-D", directory, source_path])
# Remove ToC
import re
# <div class="toc"> <p>All notable
with open(destination_path, "r") as sources:
lines = sources.readlines()
with open(destination_path, "w") as sources:
for line in lines:
sources.write(re.sub(r'<div class=\"toc\">.*<p>All notable', '<p>All notable', line))
print(f"Check quality of {destination_path}")
| {
"content_hash": "9edfb77dadc26588cd4843ed9948bfa9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 118,
"avg_line_length": 35,
"alnum_prop": 0.6819047619047619,
"repo_name": "robotframework/RIDE",
"id": "091b943fe1bd537f602ae45f8f16cbfb07b97740",
"size": "1050",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/mkhtml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.