text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0005_auto_20151129_0421'),
]
operations = [
migrations.AlterField(
model_name='band',
name='assigned_members',
field=models.ManyToManyField(related_name='bands', verbose_name=b'Assigned members', to='members.BandMember', blank=True),
),
]
| {
"content_hash": "ec2fef5caa508c35b4b8ecc22c822201",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 134,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.6297872340425532,
"repo_name": "KonichiwaKen/band-dashboard",
"id": "03c4eab55423fc2b1811d110f321f6b55eca8c52",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/migrations/0006_auto_20160126_0639.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "140"
},
{
"name": "HTML",
"bytes": "29776"
},
{
"name": "JavaScript",
"bytes": "52906"
},
{
"name": "Python",
"bytes": "72053"
}
],
"symlink_target": ""
} |
"""Wrappers for protocol buffer enum types."""
import enum
class Likelihood(enum.IntEnum):
"""
A bucketized representation of likelihood, which is intended to give clients
highly stable results across model upgrades.
Attributes:
UNKNOWN (int): Unknown likelihood.
VERY_UNLIKELY (int): It is very unlikely that the image belongs to the specified vertical.
UNLIKELY (int): It is unlikely that the image belongs to the specified vertical.
POSSIBLE (int): It is possible that the image belongs to the specified vertical.
LIKELY (int): It is likely that the image belongs to the specified vertical.
VERY_LIKELY (int): It is very likely that the image belongs to the specified vertical.
"""
UNKNOWN = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
class TextAnnotation(object):
class DetectedBreak(object):
class BreakType(enum.IntEnum):
"""
Enum to denote the type of break found. New line, space etc.
Attributes:
UNKNOWN (int): Unknown break label type.
SPACE (int): Regular space.
SURE_SPACE (int): Sure space (very wide).
EOL_SURE_SPACE (int): Line-wrapping break.
HYPHEN (int): End-line hyphen that is not present in text; does not co-occur with
``SPACE``, ``LEADER_SPACE``, or ``LINE_BREAK``.
LINE_BREAK (int): Line break that ends a paragraph.
"""
UNKNOWN = 0
SPACE = 1
SURE_SPACE = 2
EOL_SURE_SPACE = 3
HYPHEN = 4
LINE_BREAK = 5
class Block(object):
class BlockType(enum.IntEnum):
"""
Type of a block (text, image etc) as identified by OCR.
Attributes:
UNKNOWN (int): Unknown block type.
TEXT (int): Regular text block.
TABLE (int): Table block.
PICTURE (int): Image block.
RULER (int): Horizontal/vertical line box.
BARCODE (int): Barcode block.
"""
UNKNOWN = 0
TEXT = 1
TABLE = 2
PICTURE = 3
RULER = 4
BARCODE = 5
class Feature(object):
class Type(enum.IntEnum):
"""
Type of Google Cloud Vision API feature to be extracted.
Attributes:
TYPE_UNSPECIFIED (int): Unspecified feature type.
FACE_DETECTION (int): Run face detection.
LANDMARK_DETECTION (int): Run landmark detection.
LOGO_DETECTION (int): Run logo detection.
LABEL_DETECTION (int): Run label detection.
TEXT_DETECTION (int): Run text detection / optical character recognition (OCR). Text detection
is optimized for areas of text within a larger image; if the image is a
document, use ``DOCUMENT_TEXT_DETECTION`` instead.
DOCUMENT_TEXT_DETECTION (int): Run dense text document OCR. Takes precedence when both
``DOCUMENT_TEXT_DETECTION`` and ``TEXT_DETECTION`` are present.
SAFE_SEARCH_DETECTION (int): Run Safe Search to detect potentially unsafe
or undesirable content.
IMAGE_PROPERTIES (int): Compute a set of image properties, such as the
image's dominant colors.
CROP_HINTS (int): Run crop hints.
WEB_DETECTION (int): Run web detection.
"""
TYPE_UNSPECIFIED = 0
FACE_DETECTION = 1
LANDMARK_DETECTION = 2
LOGO_DETECTION = 3
LABEL_DETECTION = 4
TEXT_DETECTION = 5
DOCUMENT_TEXT_DETECTION = 11
SAFE_SEARCH_DETECTION = 6
IMAGE_PROPERTIES = 7
CROP_HINTS = 9
WEB_DETECTION = 10
class FaceAnnotation(object):
class Landmark(object):
class Type(enum.IntEnum):
"""
Face landmark (feature) type. Left and right are defined from the
vantage of the viewer of the image without considering mirror
projections typical of photos. So, ``LEFT_EYE``, typically, is the
person's right eye.
Attributes:
UNKNOWN_LANDMARK (int): Unknown face landmark detected. Should not be filled.
LEFT_EYE (int): Left eye.
RIGHT_EYE (int): Right eye.
LEFT_OF_LEFT_EYEBROW (int): Left of left eyebrow.
RIGHT_OF_LEFT_EYEBROW (int): Right of left eyebrow.
LEFT_OF_RIGHT_EYEBROW (int): Left of right eyebrow.
RIGHT_OF_RIGHT_EYEBROW (int): Right of right eyebrow.
MIDPOINT_BETWEEN_EYES (int): Midpoint between eyes.
NOSE_TIP (int): Nose tip.
UPPER_LIP (int): Upper lip.
LOWER_LIP (int): Lower lip.
MOUTH_LEFT (int): Mouth left.
MOUTH_RIGHT (int): Mouth right.
MOUTH_CENTER (int): Mouth center.
NOSE_BOTTOM_RIGHT (int): Nose, bottom right.
NOSE_BOTTOM_LEFT (int): Nose, bottom left.
NOSE_BOTTOM_CENTER (int): Nose, bottom center.
LEFT_EYE_TOP_BOUNDARY (int): Left eye, top boundary.
LEFT_EYE_RIGHT_CORNER (int): Left eye, right corner.
LEFT_EYE_BOTTOM_BOUNDARY (int): Left eye, bottom boundary.
LEFT_EYE_LEFT_CORNER (int): Left eye, left corner.
RIGHT_EYE_TOP_BOUNDARY (int): Right eye, top boundary.
RIGHT_EYE_RIGHT_CORNER (int): Right eye, right corner.
RIGHT_EYE_BOTTOM_BOUNDARY (int): Right eye, bottom boundary.
RIGHT_EYE_LEFT_CORNER (int): Right eye, left corner.
LEFT_EYEBROW_UPPER_MIDPOINT (int): Left eyebrow, upper midpoint.
RIGHT_EYEBROW_UPPER_MIDPOINT (int): Right eyebrow, upper midpoint.
LEFT_EAR_TRAGION (int): Left ear tragion.
RIGHT_EAR_TRAGION (int): Right ear tragion.
LEFT_EYE_PUPIL (int): Left eye pupil.
RIGHT_EYE_PUPIL (int): Right eye pupil.
FOREHEAD_GLABELLA (int): Forehead glabella.
CHIN_GNATHION (int): Chin gnathion.
CHIN_LEFT_GONION (int): Chin left gonion.
CHIN_RIGHT_GONION (int): Chin right gonion.
"""
UNKNOWN_LANDMARK = 0
LEFT_EYE = 1
RIGHT_EYE = 2
LEFT_OF_LEFT_EYEBROW = 3
RIGHT_OF_LEFT_EYEBROW = 4
LEFT_OF_RIGHT_EYEBROW = 5
RIGHT_OF_RIGHT_EYEBROW = 6
MIDPOINT_BETWEEN_EYES = 7
NOSE_TIP = 8
UPPER_LIP = 9
LOWER_LIP = 10
MOUTH_LEFT = 11
MOUTH_RIGHT = 12
MOUTH_CENTER = 13
NOSE_BOTTOM_RIGHT = 14
NOSE_BOTTOM_LEFT = 15
NOSE_BOTTOM_CENTER = 16
LEFT_EYE_TOP_BOUNDARY = 17
LEFT_EYE_RIGHT_CORNER = 18
LEFT_EYE_BOTTOM_BOUNDARY = 19
LEFT_EYE_LEFT_CORNER = 20
RIGHT_EYE_TOP_BOUNDARY = 21
RIGHT_EYE_RIGHT_CORNER = 22
RIGHT_EYE_BOTTOM_BOUNDARY = 23
RIGHT_EYE_LEFT_CORNER = 24
LEFT_EYEBROW_UPPER_MIDPOINT = 25
RIGHT_EYEBROW_UPPER_MIDPOINT = 26
LEFT_EAR_TRAGION = 27
RIGHT_EAR_TRAGION = 28
LEFT_EYE_PUPIL = 29
RIGHT_EYE_PUPIL = 30
FOREHEAD_GLABELLA = 31
CHIN_GNATHION = 32
CHIN_LEFT_GONION = 33
CHIN_RIGHT_GONION = 34
class OperationMetadata(object):
class State(enum.IntEnum):
"""
Batch operation states.
Attributes:
STATE_UNSPECIFIED (int): Invalid.
CREATED (int): Request is received.
RUNNING (int): Request is actively being processed.
DONE (int): The batch processing is done.
CANCELLED (int): The batch processing was cancelled.
"""
STATE_UNSPECIFIED = 0
CREATED = 1
RUNNING = 2
DONE = 3
CANCELLED = 4
| {
"content_hash": "a9ba3f7aa276af27408a3a12af5a0197",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 104,
"avg_line_length": 37.62735849056604,
"alnum_prop": 0.5715181145794158,
"repo_name": "dhermes/google-cloud-python",
"id": "509e00fec15d1833e45f3ec9b01d8dd7ff3901fc",
"size": "8578",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vision/google/cloud/vision_v1p2beta1/gapic/enums.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "936"
},
{
"name": "Makefile",
"bytes": "1779"
},
{
"name": "Python",
"bytes": "13118304"
},
{
"name": "Shell",
"bytes": "8606"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask import request
from os import popen
app = Flask(__name__)
@app.route('/')
def main_form():
data = popen('fortune').read()
return '<h1><blockquote><tt>{}</tt></blockquote></h1>'.format(data.replace('--', '<p>--'))
if __name__ == '__main__':
app.run()
| {
"content_hash": "514c2dd5eb7537104858d6f75afd1b88",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 94,
"avg_line_length": 20.133333333333333,
"alnum_prop": 0.5894039735099338,
"repo_name": "talapus/Ophidian",
"id": "9ae8837513f3411f5cec566919584f1235dfdd0f",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Flask_fu/fortune.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "154649"
},
{
"name": "JavaScript",
"bytes": "3364"
},
{
"name": "Python",
"bytes": "314611"
},
{
"name": "Shell",
"bytes": "16809"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.test import TestCase, override_settings
from rest_framework.settings import APISettings, api_settings
class TestSettings(TestCase):
def test_import_error_message_maintained(self):
"""
Make sure import errors are captured and raised sensibly.
"""
settings = APISettings({
'DEFAULT_RENDERER_CLASSES': [
'tests.invalid_module.InvalidClassName'
]
})
with self.assertRaises(ImportError):
settings.DEFAULT_RENDERER_CLASSES
def test_warning_raised_on_removed_setting(self):
"""
Make sure user is alerted with an error when a removed setting
is set.
"""
with self.assertRaises(RuntimeError):
APISettings({
'MAX_PAGINATE_BY': 100
})
def test_compatibility_with_override_settings(self):
"""
Ref #5658 & #2466: Documented usage of api_settings
is bound at import time:
from rest_framework.settings import api_settings
setting_changed signal hook must ensure bound instance
is refreshed.
"""
assert api_settings.PAGE_SIZE is None, "Checking a known default should be None"
with override_settings(REST_FRAMEWORK={'PAGE_SIZE': 10}):
assert api_settings.PAGE_SIZE == 10, "Setting should have been updated"
assert api_settings.PAGE_SIZE is None, "Setting should have been restored"
class TestSettingTypes(TestCase):
def test_settings_consistently_coerced_to_list(self):
settings = APISettings({
'DEFAULT_THROTTLE_CLASSES': ('rest_framework.throttling.BaseThrottle',)
})
self.assertTrue(isinstance(settings.DEFAULT_THROTTLE_CLASSES, list))
settings = APISettings({
'DEFAULT_THROTTLE_CLASSES': ()
})
self.assertTrue(isinstance(settings.DEFAULT_THROTTLE_CLASSES, list))
| {
"content_hash": "55d644fdd9e6e8470c5869c3d2f208e6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 88,
"avg_line_length": 33.559322033898304,
"alnum_prop": 0.6368686868686869,
"repo_name": "kgeorgy/django-rest-framework",
"id": "51e9751b25da0c70d1928c24231bb46ee5fd6007",
"size": "1980",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39386"
},
{
"name": "HTML",
"bytes": "84905"
},
{
"name": "JavaScript",
"bytes": "18201"
},
{
"name": "Python",
"bytes": "1218922"
}
],
"symlink_target": ""
} |
from rest_framework.serializers import Serializer, ModelSerializer, ValidationError
from .models import Activity, Location, Sleep
class BaseSerializer(Serializer):
"""
Base Serializer
"""
def validate(self, data):
if data['time_start'] >= data['time_end']:
raise ValidationError('Start time should be prior to end time')
return data
class Meta:
fields = (
'id',
'time_start',
'time_end',
)
class ActivitySerializer(BaseSerializer, ModelSerializer):
"""
Activity Serializer
"""
class Meta:
model = Activity
fields = BaseSerializer.Meta.fields + (
'value',
)
class LocationSerializer(ModelSerializer):
"""
Location Serializer
"""
class Meta:
model = Location
fields = BaseSerializer.Meta.fields + (
'latitude',
'longitude',
)
class SleepSerializer(ModelSerializer):
"""
Sleep Serializer
"""
class Meta:
model = Sleep
fields = BaseSerializer.Meta.fields
| {
"content_hash": "e52e2624f3265f4b7319905dba0d7595",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 83,
"avg_line_length": 21.725490196078432,
"alnum_prop": 0.5812274368231047,
"repo_name": "PEKTOP/metrics-api",
"id": "387a423cf986032004e8be6b4427640b61106ed1",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metrics/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17505"
}
],
"symlink_target": ""
} |
import spotipy.core.baseplaylist
import os
import mimetypes
class LocalPlayList(spotipy.core.baseplaylist.BasePlayList):
def __init__(self, wrapper, d, name = None):
spotipy.core.baseplaylist.BasePlayList.__init__(self, wrapper)
self.__local_dir = d
self.__name = name
def get_name(self):
if self.__name != None:
return self.__name
else:
return os.path.basename(self.__local_dir)
def get_tracks(self):
ret = []
playlist = self.__local_dir
if os.path.exists(playlist):
list = sorted(os.listdir(playlist))
count = 0
for f in list:
filename = os.path.join(playlist, f)
mime, encoding = mimetypes.guess_type(filename)
if(mime != None and mime.find("audio") == 0 and mime != "audio/x-mpegurl"):
count += 1
af = self.get_wrapper().create_audio_file(filename)
af.set_index(count)
#yield af
ret.append(af)
return ret
| {
"content_hash": "a9f76b33e95640d59a96787d704b4dab",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 91,
"avg_line_length": 33.484848484848484,
"alnum_prop": 0.5321266968325792,
"repo_name": "ZenHarbinger/spotipy",
"id": "1a0cb9d8dad50e243ae7633ad3676b132c9cf270",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spotipy/backends/local/localplaylist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2140"
},
{
"name": "JavaScript",
"bytes": "3735"
},
{
"name": "Python",
"bytes": "142376"
}
],
"symlink_target": ""
} |
"""
This is an example dag for an Amazon EMR on EKS Spark job.
"""
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.providers.amazon.aws.operators.emr import EmrContainerOperator
# [START howto_operator_emr_eks_env_variables]
VIRTUAL_CLUSTER_ID = os.getenv("VIRTUAL_CLUSTER_ID", "test-cluster")
JOB_ROLE_ARN = os.getenv("JOB_ROLE_ARN", "arn:aws:iam::012345678912:role/emr_eks_default_role")
# [END howto_operator_emr_eks_env_variables]
# [START howto_operator_emr_eks_config]
JOB_DRIVER_ARG = {
"sparkSubmitJobDriver": {
"entryPoint": "local:///usr/lib/spark/examples/src/main/python/pi.py",
"sparkSubmitParameters": "--conf spark.executors.instances=2 --conf spark.executors.memory=2G --conf spark.executor.cores=2 --conf spark.driver.cores=1", # noqa: E501
}
}
CONFIGURATION_OVERRIDES_ARG = {
"applicationConfiguration": [
{
"classification": "spark-defaults",
"properties": {
"spark.hadoop.hive.metastore.client.factory.class": "com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory", # noqa: E501
},
}
],
"monitoringConfiguration": {
"cloudWatchMonitoringConfiguration": {
"logGroupName": "/aws/emr-eks-spark",
"logStreamNamePrefix": "airflow",
}
},
}
# [END howto_operator_emr_eks_config]
with DAG(
dag_id='emr_eks_pi_job',
dagrun_timeout=timedelta(hours=2),
start_date=datetime(2021, 1, 1),
schedule_interval="@once",
catchup=False,
tags=["emr_containers", "example"],
) as dag:
# An example of how to get the cluster id and arn from an Airflow connection
# VIRTUAL_CLUSTER_ID = '{{ conn.emr_eks.extra_dejson["virtual_cluster_id"] }}'
# JOB_ROLE_ARN = '{{ conn.emr_eks.extra_dejson["job_role_arn"] }}'
# [START howto_operator_emr_eks_jobrun]
job_starter = EmrContainerOperator(
task_id="start_job",
virtual_cluster_id=VIRTUAL_CLUSTER_ID,
execution_role_arn=JOB_ROLE_ARN,
release_label="emr-6.3.0-latest",
job_driver=JOB_DRIVER_ARG,
configuration_overrides=CONFIGURATION_OVERRIDES_ARG,
name="pi.py",
)
# [END howto_operator_emr_eks_jobrun]
| {
"content_hash": "14710a066826c7250273a897175c5950",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 175,
"avg_line_length": 34.90769230769231,
"alnum_prop": 0.6584398413397973,
"repo_name": "bolkedebruin/airflow",
"id": "11c1c5b5f6ffae1b165d4a498ec0093fbe73ea01",
"size": "3054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/providers/amazon/aws/example_dags/example_emr_eks_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('domain_api', '0006_auto_20170329_0320'),
]
operations = [
migrations.RenameField(
model_name='registereddomain',
old_name='anniversary',
new_name='expiration',
),
migrations.RenameField(
model_name='topleveldomainprovider',
old_name='anniversary_notification_period_days',
new_name='expiration_notification_period_days',
),
]
| {
"content_hash": "25108ce472cb1b9101c7ed34fe7c6a28",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 60,
"avg_line_length": 25.652173913043477,
"alnum_prop": 0.6016949152542372,
"repo_name": "heytrav/drs-project",
"id": "2a0c3cdb183f8de13f255cde68125de0c180df23",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domain_api/migrations/0007_auto_20170330_0850.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "271912"
}
],
"symlink_target": ""
} |
"""Show the status of a mel repository.
This idea is to show you active concerns and what you can do about them, with
executable examples.
This is meant to be similar in usage to 'git status', or perhaps 'ls'. Answers
the question 'What's happening here, and what shall I do next?'.
"""
# There are a few things to vary on in the future:
#
# - Which paths are included
# - What kinds of things are included (alerts, errors, info, fun facts)
# - Level of detail, e.g. individual moles, rotomaps, parts, etc.
#
# Potentially we can also try to fit to a certain amount of screen real-estate.
import collections
import datetime
import os
import sys
import textwrap
from enum import IntEnum
import colorama
import mel.lib.fs
import mel.rotomap.moles
class ImportanceLevel(IntEnum):
# Alert = -1
# Error = 0
Info = 1
class Notification:
def __init__(self, path):
self.path = path
def format(self, detail_level):
return str(self.path)
def hint(self):
return None
class AlertNotification(Notification):
pass
class ErrorNotification(Notification):
pass
class InfoNotification(Notification):
pass
class RotomapNewMoleAlert(AlertNotification):
def __init__(self, path):
super().__init__(path)
self.uuid_list = []
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += "\n".join(" " * 2 + f"{u}" for u in self.uuid_list)
output += "\n"
return output
class RotomapLesionChangedAlert(AlertNotification):
def __init__(self, path):
super().__init__(path)
self.uuid_list = []
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += "\n".join(" " * 2 + f"{u}" for u in self.uuid_list)
output += "\n"
return output
class MicroLesionChangedAlert(AlertNotification):
def __init__(self, path, id_):
super().__init__(path)
self.id_ = id_
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += " " * 2 + f"{self.id_}"
output += "\n"
return output
class InvalidDateError(ErrorNotification):
pass
class RotomapDuplicateUuidError(ErrorNotification):
def __init__(self, rotomap_path):
super().__init__(rotomap_path)
self.frame_to_uuid_list = collections.defaultdict(list)
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
if detail_level == 1:
output += "\n\n"
output += "\n".join(
" " * 2 + f"{f}" for f in sorted(self.frame_to_uuid_list)
)
output += "\n"
else:
f_to_ul = self.frame_to_uuid_list
for frame, uuid_list in sorted(f_to_ul.items()):
output += "\n\n"
output += f" {frame}:\n"
output += "\n"
output += "\n".join(" " * 4 + f"{u}" for u in uuid_list)
return output
class RotomapNotLoadable(ErrorNotification):
def __init__(self, path, error=None):
super().__init__(path)
self.error = error
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0 and self.error is not None:
output += ":\n\n"
output += f" {self.error}"
if isinstance(self.error, Exception):
error = self.error
while True:
if error.__cause__:
error = error.__cause__
output += f"\n caused by '{error}'."
elif error.__context__ and not error.__suppress_context__:
error = error.__context__
output += f"\n during '{error}'."
else:
break
output += "\n"
return output
class NoBaseDirInfo(InfoNotification):
pass
class UnexpectedFileInfo(InfoNotification):
pass
class UnexpectedDirInfo(InfoNotification):
pass
class MicroMissingIdInfo(InfoNotification):
def __init__(self, path):
super().__init__(path)
def format(self, detail_level):
return f"{self.path}"
def hint(self):
return (
"Copy the id from the appropriate rotomap, or "
"use e.g. `uuidgen` to generate a new id."
)
class RotomapMissingMoleInfo(InfoNotification):
def __init__(self, path):
super().__init__(path)
self.uuid_list = []
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += "\n".join(" " * 2 + f"{u}" for u in self.uuid_list)
output += "\n"
return output
class RotomapMissingLesionUnchangedStatus(InfoNotification):
def __init__(self, path):
super().__init__(path)
self.uuid_list = []
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += "\n".join(" " * 2 + f"{u}" for u in self.uuid_list)
output += "\n"
return output
class RotomapUnconfirmedMoleInfo(InfoNotification):
def __init__(self, rotomap_path):
super().__init__(rotomap_path)
self.frame_to_uuid_list = collections.defaultdict(list)
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
if detail_level == 1:
output += "\n\n"
output += "\n".join(
" " * 2 + f"{f}" for f in sorted(self.frame_to_uuid_list)
)
output += "\n"
else:
f_to_ul = self.frame_to_uuid_list
for frame, uuid_list in sorted(f_to_ul.items()):
output += "\n\n"
output += f" {frame}:\n"
output += "\n"
output += "\n".join(" " * 4 + f"{u}" for u in uuid_list)
return output
class RotomapMissingMoleFileInfo(InfoNotification):
def __init__(self, path):
super().__init__(path)
self.frame_list = []
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += "\n".join(" " * 2 + f"{u}" for u in self.frame_list)
output += "\n"
return output
class RotomapMissingMaskInfo(InfoNotification):
def __init__(self, path):
super().__init__(path)
self.frame_list = []
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += "\n".join(" " * 2 + f"{u}" for u in self.frame_list)
output += "\n"
return output
class RotomapMissingSpaceInfo(InfoNotification):
def __init__(self, path):
super().__init__(path)
self.frame_list = []
def format(self, detail_level):
output = f"{self.path}"
if detail_level > 0:
output += "\n\n"
output += "\n".join(" " * 2 + f"{u}" for u in self.frame_list)
output += "\n"
return output
def setup_parser(parser):
parser.add_argument("PATH", nargs="?")
parser.add_argument("--detail", "-d", action="count", default=0)
parser.add_argument("--trivia", "-t", action="count", default=0)
def process_args(args):
colorama.init()
try:
melroot = mel.lib.fs.find_melroot()
except mel.lib.fs.NoMelrootError:
print("Not in a mel repo, could not find melroot", file=sys.stderr)
return 1
if args.detail > 2:
print(f"melroot: {melroot}")
notice_list = []
rotomaps_path = melroot / mel.lib.fs.ROTOMAPS_PATH
if rotomaps_path.exists():
check_rotomaps(rotomaps_path, notice_list, args.trivia)
else:
notice_list.append(NoBaseDirInfo(mel.lib.fs.ROTOMAPS_PATH))
micro_path = melroot / mel.lib.fs.MICRO_PATH
if micro_path.exists():
check_micro(micro_path, notice_list)
else:
notice_list.append(NoBaseDirInfo(mel.lib.fs.MICRO_PATH))
alerts_to_notices = collections.defaultdict(list)
errors_to_notices = collections.defaultdict(list)
info_to_notices = collections.defaultdict(list)
abspath = os.path.abspath(args.PATH) if args.PATH is not None else None
for notice in notice_list:
if abspath is not None:
if not str(notice.path).startswith(abspath):
continue
klass = notice.__class__
if issubclass(klass, AlertNotification):
alerts_to_notices[klass].append(notice)
elif issubclass(klass, ErrorNotification):
errors_to_notices[klass].append(notice)
elif issubclass(klass, InfoNotification):
info_to_notices[klass].append(notice)
else:
raise RuntimeError(f"Unexpected notice type: {klass}")
any_notices = bool(alerts_to_notices or errors_to_notices)
print_klass_to_notices(alerts_to_notices, args.detail, colorama.Fore.RED)
print_klass_to_notices(
errors_to_notices, args.detail, colorama.Fore.MAGENTA
)
if args.trivia > 0:
print_klass_to_notices(
info_to_notices, args.detail, colorama.Fore.BLUE
)
if not any_notices and info_to_notices:
any_notices = True
if any_notices:
print()
return any_notices
def print_klass_to_notices(klass_to_notices, detail_level, fore):
for klass, notice_list in klass_to_notices.items():
print()
print(fore, klass.__name__, colorama.Fore.RESET)
for notice in notice_list:
print(textwrap.indent(notice.format(detail_level), " "))
hint = notice.hint()
if hint is not None:
hint = f"({hint})"
print()
print(textwrap.indent(textwrap.fill(hint), " "))
def check_rotomaps(path, notices, importance_level):
# incoming_path = path / 'incoming'
parts_path = path / "parts"
if parts_path.exists():
# So far I've organised parts like so:
#
# LeftLeg/Upper
# LeftLeg/Lower
# LeftLeg/Foot
# Trunk/Back
# Trunk/Waist
# etc.
#
# So each part is a two-level thing. Each of the parts has leaf
# directories that are the actual rotomaps. The rotomaps are names
# after the day they are captured, in ISO 8601 format. Like so:
#
# LeftLeg/Upper/2017_01_20
# LeftLeg/Upper/2017_02_14
# LeftLeg/Upper/2017_04_01
# etc.
#
# Might as well assume the same for all, for now. Later we can allow
# arbitrary nesting.
#
for major_part in parts_path.iterdir():
if major_part.is_dir():
for minor_part in major_part.iterdir():
if minor_part.is_dir():
check_rotomap_minor_part(
minor_part, notices, importance_level
)
else:
notices.append(UnexpectedFileInfo(minor_part))
else:
notices.append(UnexpectedFileInfo(major_part))
else:
notices.append(NoBaseDirInfo(parts_path))
def check_rotomap_minor_part(path, notices, importance_level):
rotomap_list = make_rotomap_list(path, notices)
check_rotomap_list(notices, rotomap_list)
for rotomap in rotomap_list:
check_rotomap(notices, rotomap, importance_level)
if rotomap_list:
check_newest_rotomap(notices, rotomap_list[-1])
def make_rotomap_list(path, notices):
rotomap_list = []
for rotomap_path in path.iterdir():
if rotomap_path.is_dir():
try:
datetime.datetime.strptime(rotomap_path.name[:10], "%Y_%m_%d")
except ValueError:
notices.append(InvalidDateError(rotomap_path))
else:
rotomap_list.append(
mel.rotomap.moles.RotomapDirectory(rotomap_path)
)
else:
notices.append(UnexpectedFileInfo(rotomap_path))
rotomap_list.sort(key=lambda x: x.path)
return rotomap_list
def uuids_from_dir(rotomap_dir):
uuid_set = set()
for _, moles in rotomap_dir.yield_mole_lists():
for m in moles:
if m[mel.rotomap.moles.KEY_IS_CONFIRMED]:
uuid_set.add(m["uuid"])
return uuid_set
def check_rotomap_list(notices, rotomap_list):
if len(rotomap_list) < 2:
return
old_ones = rotomap_list[:-1]
newest = rotomap_list[-1]
uuid_to_oldmaps = collections.defaultdict(set)
for dir_ in old_ones:
for _, mole_list in dir_.yield_mole_lists():
for mole in mole_list:
if mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
uuid_to_oldmaps[mole["uuid"]].add(dir_.path)
old_uuids = set(uuid_to_oldmaps.keys())
new_uuids = uuids_from_dir(newest)
ignore_new = mel.rotomap.moles.load_potential_set_file(
newest.path, mel.rotomap.moles.IGNORE_NEW_FILENAME
)
ignore_missing = mel.rotomap.moles.load_potential_set_file(
newest.path, mel.rotomap.moles.IGNORE_MISSING_FILENAME
)
diff = mel.rotomap.moles.MoleListDiff(
old_uuids, new_uuids, ignore_new, ignore_missing
)
if diff.new:
new_mole_alert = RotomapNewMoleAlert(newest.path)
new_mole_alert.uuid_list.extend(diff.new)
notices.append(new_mole_alert)
if diff.missing:
missing_notification = RotomapMissingMoleInfo(newest.path)
missing_notification.uuid_list.extend(diff.missing)
notices.append(missing_notification)
def check_rotomap(notices, rotomap, importance_level):
unconfirmed = RotomapUnconfirmedMoleInfo(rotomap.path)
duplicates = RotomapDuplicateUuidError(rotomap.path)
for imagepath, mole_list in rotomap.yield_mole_lists():
current_uuid_set = set()
for mole in mole_list:
uuid_ = mole["uuid"]
if uuid_ in current_uuid_set:
duplicates.frame_to_uuid_list[imagepath].append(uuid_)
current_uuid_set.add(uuid_)
if not mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
unconfirmed.frame_to_uuid_list[imagepath].append(uuid_)
if duplicates.frame_to_uuid_list:
notices.append(duplicates)
if unconfirmed.frame_to_uuid_list:
notices.append(unconfirmed)
if importance_level >= ImportanceLevel.Info:
missing_mole_file_info = RotomapMissingMoleFileInfo(rotomap.path)
missing_mask_info = RotomapMissingMaskInfo(rotomap.path)
missing_space_info = RotomapMissingSpaceInfo(rotomap.path)
try:
for frame in rotomap.yield_frames():
if not frame.has_mole_file():
missing_mole_file_info.frame_list.append(frame.path)
if not frame.has_mask():
missing_mask_info.frame_list.append(frame.path)
if "ellipse" not in frame.metadata:
missing_space_info.frame_list.append(frame.path)
except Exception as e:
notices.append(RotomapNotLoadable(rotomap.path, e))
for i in rotomap.path.iterdir():
if i.is_dir():
notices.append(UnexpectedDirInfo(i))
if missing_mole_file_info.frame_list:
notices.append(missing_mole_file_info)
if missing_mask_info.frame_list:
notices.append(missing_mask_info)
if missing_space_info.frame_list:
notices.append(missing_space_info)
def check_newest_rotomap(notices, rotomap):
missing_unchanged_status = RotomapMissingLesionUnchangedStatus(
rotomap.path
)
changed = RotomapLesionChangedAlert(rotomap.path)
ignore_new = mel.rotomap.moles.load_potential_set_file(
rotomap.path, mel.rotomap.moles.IGNORE_NEW_FILENAME
)
uuids = rotomap.calc_uuids()
uuid_to_unchanged_status = {
lesion["uuid"]: lesion[mel.rotomap.moles.KEY_IS_UNCHANGED]
for lesion in rotomap.lesions
}
ignore_changed = mel.rotomap.moles.load_potential_set_file(
rotomap.path, mel.rotomap.moles.IGNORE_CHANGED_FILENAME
)
for u in uuids:
if u not in uuid_to_unchanged_status:
if u in ignore_new:
continue
missing_unchanged_status.uuid_list.append(u)
continue
unchanged_status = uuid_to_unchanged_status[u]
if unchanged_status is None:
if u in ignore_new:
continue
missing_unchanged_status.uuid_list.append(u)
continue
elif not unchanged_status:
if u not in ignore_changed:
changed.uuid_list.append(u)
continue
if missing_unchanged_status.uuid_list:
notices.append(missing_unchanged_status)
if changed.uuid_list:
notices.append(changed)
def check_micro(path, notices):
parts_path = path / "data"
if parts_path.exists():
# So far I've organised parts like so:
#
# LeftArm/Hand
# LeftArm/Upper
# LeftLeg/Foot
# LeftLeg/LowerLeg
# LeftLeg/UpperLeg
# RightArm/Armpit
# RightArm/Forearm
# RightArm/Hand
# RightArm/Upper
# etc.
#
# So each part is a two-level thing. Each of the parts has leaf
# directories that are the actual moles or mole groups.
#
for major_part in parts_path.iterdir():
if major_part.is_dir():
for minor_part in major_part.iterdir():
if minor_part.is_dir():
for mole in mel.micro.fs.yield_moles(minor_part):
_validate_mole_dir(mole.path, notices)
changed_path = mel.micro.fs.Names.CHANGED
if (mole.path / changed_path).exists():
notices.append(
MicroLesionChangedAlert(mole.path, mole.id)
)
if mole.id is None:
notices.append(
MicroMissingIdInfo(
mole.path / mel.micro.fs.Names.ID
)
)
else:
notices.append(UnexpectedFileInfo(minor_part))
else:
notices.append(UnexpectedFileInfo(major_part))
else:
notices.append(NoBaseDirInfo(parts_path))
def _validate_mole_dir(path, notices):
for sub in path.iterdir():
if sub.name.lower() not in mel.micro.fs.MOLE_DIR_ENTRIES:
if sub.suffix.lower() in mel.micro.fs.IMAGE_SUFFIXES:
continue
if sub.name in mel.micro.fs.FILES_TO_IGNORE and sub.is_file():
continue
if sub.name in mel.micro.fs.DIRS_TO_IGNORE and sub.is_dir():
continue
if sub.is_dir():
notices.append(UnexpectedDirInfo(sub))
else:
notices.append(UnexpectedFileInfo(sub))
# -----------------------------------------------------------------------------
# Copyright (C) 2018-2019 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"content_hash": "c8fe33ec74370ef0c889351b96f4d06a",
"timestamp": "",
"source": "github",
"line_count": 660,
"max_line_length": 79,
"avg_line_length": 31.006060606060608,
"alnum_prop": 0.5625488663017982,
"repo_name": "aevri/mel",
"id": "379674da737f7d46f32489402b8c2e7a28242494",
"size": "20464",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mel/cmd/status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "355692"
},
{
"name": "Shell",
"bytes": "2041"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
from util import as_row, as_col
def noise(d, variance=1.0):
m = np.zeros_like(d)
m[d == 0.0] = variance
return m
def constant(d, variance):
return variance * np.ones_like(d)
def grad_constant(d, variance):
return {'variance': np.ones_like(d)}
def squared_exponential(d, variance, length):
r = np.abs(d)
return variance * np.exp(-0.5 * (r / length) ** 2)
def grad_squared_exponential(d, variance, length):
r = np.abs(d)
g = dict()
g['variance'] = squared_exponential(d, 1.0, length)
g['length'] = squared_exponential(d, variance, length)
g['length'] *= (r ** 2) / (length ** 3)
return g
def exponential(d, variance, length):
r = np.abs(d)
return variance * np.exp(- r / length)
def grad_exponential(d, variance, length):
r = np.abs(d)
g = dict()
g['variance'] = exponential(d, 1.0, length)
g['length'] = exponential(d, variance, length)
g['length'] *= r / (length ** 2)
return g
def pairwise_differences(x1, x2):
"""Compute differences between all pairs of elements in two vectors.
# Arguments
x1 : A sequence of numbers.
x2 : A sequence of numbers.
# Returns
A matrix with `len(x1)` rows and `len(x2)` columns.
"""
x1 = np.asarray(x1)
x2 = np.asarray(x2)
return as_col(x1) - as_row(x2)
| {
"content_hash": "9260fff354159bea8d652c6e15e1c3e4",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 72,
"avg_line_length": 21.384615384615383,
"alnum_prop": 0.60431654676259,
"repo_name": "pschulam/mypy",
"id": "ccdcb549ed906bd2504dc08d4f764d99149c7ba2",
"size": "1390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypy/kernels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7506"
}
],
"symlink_target": ""
} |
"""Module to help with parsing and generating configuration files."""
from collections import OrderedDict
# pylint: disable=no-name-in-module
from distutils.version import LooseVersion # pylint: disable=import-error
import logging
import os
import re
import shutil
from typing import ( # noqa: F401 pylint: disable=unused-import
Any,
Tuple,
Optional,
Dict,
List,
Union,
Callable,
Sequence,
Set,
)
from types import ModuleType
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant import auth
from homeassistant.auth import (
providers as auth_providers,
mfa_modules as auth_mfa_modules,
)
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_HIDDEN,
ATTR_ASSUMED_STATE,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_PACKAGES,
CONF_UNIT_SYSTEM,
CONF_TIME_ZONE,
CONF_ELEVATION,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TEMPERATURE_UNIT,
TEMP_CELSIUS,
__version__,
CONF_CUSTOMIZE,
CONF_CUSTOMIZE_DOMAIN,
CONF_CUSTOMIZE_GLOB,
CONF_WHITELIST_EXTERNAL_DIRS,
CONF_AUTH_PROVIDERS,
CONF_AUTH_MFA_MODULES,
CONF_TYPE,
CONF_ID,
)
from homeassistant.core import DOMAIN as CONF_CORE, SOURCE_YAML, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import Integration, IntegrationNotFound
from homeassistant.requirements import (
async_get_integration_with_requirements,
RequirementsNotFound,
)
from homeassistant.util.yaml import load_yaml, SECRET_YAML
from homeassistant.util.package import is_docker_env
import homeassistant.helpers.config_validation as cv
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers import config_per_platform, extract_domain_configs
_LOGGER = logging.getLogger(__name__)
DATA_PERSISTENT_ERRORS = "bootstrap_persistent_errors"
RE_YAML_ERROR = re.compile(r"homeassistant\.util\.yaml")
RE_ASCII = re.compile(r"\033\[[^m]*m")
HA_COMPONENT_URL = "[{}](https://home-assistant.io/components/{}/)"
YAML_CONFIG_FILE = "configuration.yaml"
VERSION_FILE = ".HA_VERSION"
CONFIG_DIR_NAME = ".homeassistant"
DATA_CUSTOMIZE = "hass_customize"
FILE_MIGRATION = (("ios.conf", ".ios.conf"),)
DEFAULT_CONFIG = """
# Configure a default setup of Home Assistant (frontend, api, etc)
default_config:
# Uncomment this if you are using SSL/TLS, running in Docker container, etc.
# http:
# base_url: example.duckdns.org:8123
# Text to speech
tts:
- platform: google_translate
group: !include groups.yaml
automation: !include automations.yaml
script: !include scripts.yaml
"""
DEFAULT_SECRETS = """
# Use this file to store secrets like usernames and passwords.
# Learn more at https://home-assistant.io/docs/configuration/secrets/
some_password: welcome
"""
TTS_PRE_92 = """
tts:
- platform: google
"""
TTS_92 = """
tts:
- platform: google_translate
service_name: google_say
"""
def _no_duplicate_auth_provider(
configs: Sequence[Dict[str, Any]]
) -> Sequence[Dict[str, Any]]:
"""No duplicate auth provider config allowed in a list.
Each type of auth provider can only have one config without optional id.
Unique id is required if same type of auth provider used multiple times.
"""
config_keys = set() # type: Set[Tuple[str, Optional[str]]]
for config in configs:
key = (config[CONF_TYPE], config.get(CONF_ID))
if key in config_keys:
raise vol.Invalid(
"Duplicate auth provider {} found. Please add unique IDs if "
"you want to have the same auth provider twice".format(
config[CONF_TYPE]
)
)
config_keys.add(key)
return configs
def _no_duplicate_auth_mfa_module(
configs: Sequence[Dict[str, Any]]
) -> Sequence[Dict[str, Any]]:
"""No duplicate auth mfa module item allowed in a list.
Each type of mfa module can only have one config without optional id.
A global unique id is required if same type of mfa module used multiple
times.
Note: this is different than auth provider
"""
config_keys = set() # type: Set[str]
for config in configs:
key = config.get(CONF_ID, config[CONF_TYPE])
if key in config_keys:
raise vol.Invalid(
"Duplicate mfa module {} found. Please add unique IDs if "
"you want to have the same mfa module twice".format(config[CONF_TYPE])
)
config_keys.add(key)
return configs
PACKAGES_CONFIG_SCHEMA = cv.schema_with_slug_keys( # Package names are slugs
vol.Schema({cv.string: vol.Any(dict, list, None)}) # Component config
)
CUSTOMIZE_DICT_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_HIDDEN): cv.boolean,
vol.Optional(ATTR_ASSUMED_STATE): cv.boolean,
},
extra=vol.ALLOW_EXTRA,
)
CUSTOMIZE_CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(CONF_CUSTOMIZE, default={}): vol.Schema(
{cv.entity_id: CUSTOMIZE_DICT_SCHEMA}
),
vol.Optional(CONF_CUSTOMIZE_DOMAIN, default={}): vol.Schema(
{cv.string: CUSTOMIZE_DICT_SCHEMA}
),
vol.Optional(CONF_CUSTOMIZE_GLOB, default={}): vol.Schema(
{cv.string: CUSTOMIZE_DICT_SCHEMA}
),
}
)
CORE_CONFIG_SCHEMA = CUSTOMIZE_CONFIG_SCHEMA.extend(
{
CONF_NAME: vol.Coerce(str),
CONF_LATITUDE: cv.latitude,
CONF_LONGITUDE: cv.longitude,
CONF_ELEVATION: vol.Coerce(int),
vol.Optional(CONF_TEMPERATURE_UNIT): cv.temperature_unit,
CONF_UNIT_SYSTEM: cv.unit_system,
CONF_TIME_ZONE: cv.time_zone,
vol.Optional(CONF_WHITELIST_EXTERNAL_DIRS):
# pylint: disable=no-value-for-parameter
vol.All(cv.ensure_list, [vol.IsDir()]),
vol.Optional(CONF_PACKAGES, default={}): PACKAGES_CONFIG_SCHEMA,
vol.Optional(CONF_AUTH_PROVIDERS): vol.All(
cv.ensure_list,
[
auth_providers.AUTH_PROVIDER_SCHEMA.extend(
{
CONF_TYPE: vol.NotIn(
["insecure_example"],
"The insecure_example auth provider"
" is for testing only.",
)
}
)
],
_no_duplicate_auth_provider,
),
vol.Optional(CONF_AUTH_MFA_MODULES): vol.All(
cv.ensure_list,
[
auth_mfa_modules.MULTI_FACTOR_AUTH_MODULE_SCHEMA.extend(
{
CONF_TYPE: vol.NotIn(
["insecure_example"],
"The insecure_example mfa module" " is for testing only.",
)
}
)
],
_no_duplicate_auth_mfa_module,
),
}
)
def get_default_config_dir() -> str:
"""Put together the default configuration directory based on the OS."""
data_dir = os.getenv("APPDATA") if os.name == "nt" else os.path.expanduser("~")
return os.path.join(data_dir, CONFIG_DIR_NAME) # type: ignore
async def async_ensure_config_exists(
hass: HomeAssistant, config_dir: str
) -> Optional[str]:
"""Ensure a configuration file exists in given configuration directory.
Creating a default one if needed.
Return path to the configuration file.
"""
config_path = find_config_file(config_dir)
if config_path is None:
print("Unable to find configuration. Creating default one in", config_dir)
config_path = await async_create_default_config(hass, config_dir)
return config_path
async def async_create_default_config(
hass: HomeAssistant, config_dir: str
) -> Optional[str]:
"""Create a default configuration file in given configuration directory.
Return path to new config file if success, None if failed.
This method needs to run in an executor.
"""
return await hass.async_add_executor_job(_write_default_config, config_dir)
def _write_default_config(config_dir: str) -> Optional[str]:
"""Write the default config."""
from homeassistant.components.config.group import CONFIG_PATH as GROUP_CONFIG_PATH
from homeassistant.components.config.automation import (
CONFIG_PATH as AUTOMATION_CONFIG_PATH,
)
from homeassistant.components.config.script import CONFIG_PATH as SCRIPT_CONFIG_PATH
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
secret_path = os.path.join(config_dir, SECRET_YAML)
version_path = os.path.join(config_dir, VERSION_FILE)
group_yaml_path = os.path.join(config_dir, GROUP_CONFIG_PATH)
automation_yaml_path = os.path.join(config_dir, AUTOMATION_CONFIG_PATH)
script_yaml_path = os.path.join(config_dir, SCRIPT_CONFIG_PATH)
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, "wt") as config_file:
config_file.write(DEFAULT_CONFIG)
with open(secret_path, "wt") as secret_file:
secret_file.write(DEFAULT_SECRETS)
with open(version_path, "wt") as version_file:
version_file.write(__version__)
with open(group_yaml_path, "wt"):
pass
with open(automation_yaml_path, "wt") as fil:
fil.write("[]")
with open(script_yaml_path, "wt"):
pass
return config_path
except IOError:
print("Unable to create default configuration file", config_path)
return None
async def async_hass_config_yaml(hass: HomeAssistant) -> Dict:
"""Load YAML from a Home Assistant configuration file.
This function allow a component inside the asyncio loop to reload its
configuration by itself. Include package merge.
This method is a coroutine.
"""
def _load_hass_yaml_config() -> Dict:
path = find_config_file(hass.config.config_dir)
if path is None:
raise HomeAssistantError(
"Config file not found in: {}".format(hass.config.config_dir)
)
config = load_yaml_config_file(path)
return config
config = await hass.async_add_executor_job(_load_hass_yaml_config)
core_config = config.get(CONF_CORE, {})
await merge_packages_config(hass, config, core_config.get(CONF_PACKAGES, {}))
return config
def find_config_file(config_dir: Optional[str]) -> Optional[str]:
"""Look in given directory for supported configuration files."""
if config_dir is None:
return None
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
return config_path if os.path.isfile(config_path) else None
def load_yaml_config_file(config_path: str) -> Dict[Any, Any]:
"""Parse a YAML configuration file.
Raises FileNotFoundError or HomeAssistantError.
This method needs to run in an executor.
"""
conf_dict = load_yaml(config_path)
if not isinstance(conf_dict, dict):
msg = "The configuration file {} does not contain a dictionary".format(
os.path.basename(config_path)
)
_LOGGER.error(msg)
raise HomeAssistantError(msg)
# Convert values to dictionaries if they are None
for key, value in conf_dict.items():
conf_dict[key] = value or {}
return conf_dict
def process_ha_config_upgrade(hass: HomeAssistant) -> None:
"""Upgrade configuration if necessary.
This method needs to run in an executor.
"""
version_path = hass.config.path(VERSION_FILE)
try:
with open(version_path, "rt") as inp:
conf_version = inp.readline().strip()
except FileNotFoundError:
# Last version to not have this file
conf_version = "0.7.7"
if conf_version == __version__:
return
_LOGGER.info(
"Upgrading configuration directory from %s to %s", conf_version, __version__
)
version_obj = LooseVersion(conf_version)
if version_obj < LooseVersion("0.50"):
# 0.50 introduced persistent deps dir.
lib_path = hass.config.path("deps")
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
if version_obj < LooseVersion("0.92"):
# 0.92 moved google/tts.py to google_translate/tts.py
config_path = find_config_file(hass.config.config_dir)
assert config_path is not None
with open(config_path, "rt", encoding="utf-8") as config_file:
config_raw = config_file.read()
if TTS_PRE_92 in config_raw:
_LOGGER.info("Migrating google tts to google_translate tts")
config_raw = config_raw.replace(TTS_PRE_92, TTS_92)
try:
with open(config_path, "wt", encoding="utf-8") as config_file:
config_file.write(config_raw)
except IOError:
_LOGGER.exception("Migrating to google_translate tts failed")
pass
if version_obj < LooseVersion("0.94") and is_docker_env():
# In 0.94 we no longer install packages inside the deps folder when
# running inside a Docker container.
lib_path = hass.config.path("deps")
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
with open(version_path, "wt") as outp:
outp.write(__version__)
_LOGGER.debug("Migrating old system configuration files to new locations")
for oldf, newf in FILE_MIGRATION:
if os.path.isfile(hass.config.path(oldf)):
_LOGGER.info("Migrating %s to %s", oldf, newf)
os.rename(hass.config.path(oldf), hass.config.path(newf))
@callback
def async_log_exception(
ex: vol.Invalid, domain: str, config: Dict, hass: HomeAssistant
) -> None:
"""Log an error for configuration validation.
This method must be run in the event loop.
"""
if hass is not None:
async_notify_setup_error(hass, domain, True)
_LOGGER.error(_format_config_error(ex, domain, config))
@callback
def _format_config_error(ex: vol.Invalid, domain: str, config: Dict) -> str:
"""Generate log exception for configuration validation.
This method must be run in the event loop.
"""
message = "Invalid config for [{}]: ".format(domain)
if "extra keys not allowed" in ex.error_message:
message += (
"[{option}] is an invalid option for [{domain}]. "
"Check: {domain}->{path}.".format(
option=ex.path[-1],
domain=domain,
path="->".join(str(m) for m in ex.path),
)
)
else:
message += "{}.".format(humanize_error(config, ex))
try:
domain_config = config.get(domain, config)
except AttributeError:
domain_config = config
message += " (See {}, line {}). ".format(
getattr(domain_config, "__config_file__", "?"),
getattr(domain_config, "__line__", "?"),
)
if domain != CONF_CORE:
message += (
"Please check the docs at "
"https://home-assistant.io/components/{}/".format(domain)
)
return message
async def async_process_ha_core_config(
hass: HomeAssistant,
config: Dict,
api_password: Optional[str] = None,
trusted_networks: Optional[Any] = None,
) -> None:
"""Process the [homeassistant] section from the configuration.
This method is a coroutine.
"""
config = CORE_CONFIG_SCHEMA(config)
# Only load auth during startup.
if not hasattr(hass, "auth"):
auth_conf = config.get(CONF_AUTH_PROVIDERS)
if auth_conf is None:
auth_conf = [{"type": "homeassistant"}]
if api_password:
auth_conf.append(
{"type": "legacy_api_password", "api_password": api_password}
)
if trusted_networks:
auth_conf.append(
{"type": "trusted_networks", "trusted_networks": trusted_networks}
)
mfa_conf = config.get(
CONF_AUTH_MFA_MODULES,
[{"type": "totp", "id": "totp", "name": "Authenticator app"}],
)
setattr(
hass, "auth", await auth.auth_manager_from_config(hass, auth_conf, mfa_conf)
)
await hass.config.async_load()
hac = hass.config
if any(
[
k in config
for k in [
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_ELEVATION,
CONF_TIME_ZONE,
CONF_UNIT_SYSTEM,
]
]
):
hac.config_source = SOURCE_YAML
for key, attr in (
(CONF_LATITUDE, "latitude"),
(CONF_LONGITUDE, "longitude"),
(CONF_NAME, "location_name"),
(CONF_ELEVATION, "elevation"),
):
if key in config:
setattr(hac, attr, config[key])
if CONF_TIME_ZONE in config:
hac.set_time_zone(config[CONF_TIME_ZONE])
# Init whitelist external dir
hac.whitelist_external_dirs = {hass.config.path("www")}
if CONF_WHITELIST_EXTERNAL_DIRS in config:
hac.whitelist_external_dirs.update(set(config[CONF_WHITELIST_EXTERNAL_DIRS]))
# Customize
cust_exact = dict(config[CONF_CUSTOMIZE])
cust_domain = dict(config[CONF_CUSTOMIZE_DOMAIN])
cust_glob = OrderedDict(config[CONF_CUSTOMIZE_GLOB])
for name, pkg in config[CONF_PACKAGES].items():
pkg_cust = pkg.get(CONF_CORE)
if pkg_cust is None:
continue
try:
pkg_cust = CUSTOMIZE_CONFIG_SCHEMA(pkg_cust)
except vol.Invalid:
_LOGGER.warning("Package %s contains invalid customize", name)
continue
cust_exact.update(pkg_cust[CONF_CUSTOMIZE])
cust_domain.update(pkg_cust[CONF_CUSTOMIZE_DOMAIN])
cust_glob.update(pkg_cust[CONF_CUSTOMIZE_GLOB])
hass.data[DATA_CUSTOMIZE] = EntityValues(cust_exact, cust_domain, cust_glob)
if CONF_UNIT_SYSTEM in config:
if config[CONF_UNIT_SYSTEM] == CONF_UNIT_SYSTEM_IMPERIAL:
hac.units = IMPERIAL_SYSTEM
else:
hac.units = METRIC_SYSTEM
elif CONF_TEMPERATURE_UNIT in config:
unit = config[CONF_TEMPERATURE_UNIT]
if unit == TEMP_CELSIUS:
hac.units = METRIC_SYSTEM
else:
hac.units = IMPERIAL_SYSTEM
_LOGGER.warning(
"Found deprecated temperature unit in core "
"configuration expected unit system. Replace '%s: %s' "
"with '%s: %s'",
CONF_TEMPERATURE_UNIT,
unit,
CONF_UNIT_SYSTEM,
hac.units.name,
)
def _log_pkg_error(package: str, component: str, config: Dict, message: str) -> None:
"""Log an error while merging packages."""
message = "Package {} setup failed. Integration {} {}".format(
package, component, message
)
pack_config = config[CONF_CORE][CONF_PACKAGES].get(package, config)
message += " (See {}:{}). ".format(
getattr(pack_config, "__config_file__", "?"),
getattr(pack_config, "__line__", "?"),
)
_LOGGER.error(message)
def _identify_config_schema(module: ModuleType) -> Tuple[Optional[str], Optional[Dict]]:
"""Extract the schema and identify list or dict based."""
try:
schema = module.CONFIG_SCHEMA.schema[module.DOMAIN] # type: ignore
except (AttributeError, KeyError):
return None, None
t_schema = str(schema)
if t_schema.startswith("{") or "schema_with_slug_keys" in t_schema:
return ("dict", schema)
if t_schema.startswith(("[", "All(<function ensure_list")):
return ("list", schema)
return "", schema
def _recursive_merge(conf: Dict[str, Any], package: Dict[str, Any]) -> Union[bool, str]:
"""Merge package into conf, recursively."""
error = False # type: Union[bool, str]
for key, pack_conf in package.items():
if isinstance(pack_conf, dict):
if not pack_conf:
continue
conf[key] = conf.get(key, OrderedDict())
error = _recursive_merge(conf=conf[key], package=pack_conf)
elif isinstance(pack_conf, list):
if not pack_conf:
continue
conf[key] = cv.ensure_list(conf.get(key))
conf[key].extend(cv.ensure_list(pack_conf))
else:
if conf.get(key) is not None:
return key
conf[key] = pack_conf
return error
async def merge_packages_config(
hass: HomeAssistant,
config: Dict,
packages: Dict[str, Any],
_log_pkg_error: Callable = _log_pkg_error,
) -> Dict:
"""Merge packages into the top-level configuration. Mutate config."""
# pylint: disable=too-many-nested-blocks
PACKAGES_CONFIG_SCHEMA(packages)
for pack_name, pack_conf in packages.items():
for comp_name, comp_conf in pack_conf.items():
if comp_name == CONF_CORE:
continue
# If component name is given with a trailing description, remove it
# when looking for component
domain = comp_name.split(" ")[0]
try:
integration = await async_get_integration_with_requirements(
hass, domain
)
component = integration.get_component()
except (IntegrationNotFound, RequirementsNotFound, ImportError) as ex:
_log_pkg_error(pack_name, comp_name, config, str(ex))
continue
if hasattr(component, "PLATFORM_SCHEMA"):
if not comp_conf:
continue # Ensure we dont add Falsy items to list
config[comp_name] = cv.ensure_list(config.get(comp_name))
config[comp_name].extend(cv.ensure_list(comp_conf))
continue
if hasattr(component, "CONFIG_SCHEMA"):
merge_type, _ = _identify_config_schema(component)
if merge_type == "list":
if not comp_conf:
continue # Ensure we dont add Falsy items to list
config[comp_name] = cv.ensure_list(config.get(comp_name))
config[comp_name].extend(cv.ensure_list(comp_conf))
continue
if comp_conf is None:
comp_conf = OrderedDict()
if not isinstance(comp_conf, dict):
_log_pkg_error(
pack_name, comp_name, config, "cannot be merged. Expected a dict."
)
continue
if comp_name not in config or config[comp_name] is None:
config[comp_name] = OrderedDict()
if not isinstance(config[comp_name], dict):
_log_pkg_error(
pack_name,
comp_name,
config,
"cannot be merged. Dict expected in main config.",
)
continue
error = _recursive_merge(conf=config[comp_name], package=comp_conf)
if error:
_log_pkg_error(
pack_name, comp_name, config, "has duplicate key '{}'".format(error)
)
return config
async def async_process_component_config(
hass: HomeAssistant, config: Dict, integration: Integration
) -> Optional[Dict]:
"""Check component configuration and return processed configuration.
Returns None on error.
This method must be run in the event loop.
"""
domain = integration.domain
try:
component = integration.get_component()
except ImportError as ex:
_LOGGER.error("Unable to import %s: %s", domain, ex)
return None
if hasattr(component, "CONFIG_SCHEMA"):
try:
return component.CONFIG_SCHEMA(config) # type: ignore
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass)
return None
component_platform_schema = getattr(
component, "PLATFORM_SCHEMA_BASE", getattr(component, "PLATFORM_SCHEMA", None)
)
if component_platform_schema is None:
return config
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component_platform_schema(p_config)
except vol.Invalid as ex:
async_log_exception(ex, domain, p_config, hass)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
try:
p_integration = await async_get_integration_with_requirements(hass, p_name)
except (RequirementsNotFound, IntegrationNotFound) as ex:
_LOGGER.error("Platform error: %s - %s", domain, ex)
continue
try:
platform = p_integration.get_platform(domain)
except ImportError:
_LOGGER.exception("Platform error: %s", domain)
continue
# Validate platform specific schema
if hasattr(platform, "PLATFORM_SCHEMA"):
# pylint: disable=no-member
try:
p_validated = platform.PLATFORM_SCHEMA( # type: ignore
p_config
)
except vol.Invalid as ex:
async_log_exception(ex, "{}.{}".format(domain, p_name), p_config, hass)
continue
platforms.append(p_validated)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
config = config_without_domain(config, domain)
config[domain] = platforms
return config
@callback
def config_without_domain(config: Dict, domain: str) -> Dict:
"""Return a config with all configuration for a domain removed."""
filter_keys = extract_domain_configs(config, domain)
return {key: value for key, value in config.items() if key not in filter_keys}
async def async_check_ha_config_file(hass: HomeAssistant) -> Optional[str]:
"""Check if Home Assistant configuration file is valid.
This method is a coroutine.
"""
import homeassistant.helpers.check_config as check_config
res = await check_config.async_check_ha_config_file(hass)
if not res.errors:
return None
return res.error_str
@callback
def async_notify_setup_error(
hass: HomeAssistant, component: str, display_link: bool = False
) -> None:
"""Print a persistent notification.
This method must be run in the event loop.
"""
from homeassistant.components import persistent_notification
errors = hass.data.get(DATA_PERSISTENT_ERRORS)
if errors is None:
errors = hass.data[DATA_PERSISTENT_ERRORS] = {}
errors[component] = errors.get(component) or display_link
message = "The following components and platforms could not be set up:\n\n"
for name, link in errors.items():
if link:
part = HA_COMPONENT_URL.format(name.replace("_", "-"), name)
else:
part = name
message += " - {}\n".format(part)
message += "\nPlease check your config."
persistent_notification.async_create(
hass, message, "Invalid config", "invalid_config"
)
| {
"content_hash": "f9db096ac69d2a5afe83fe81b945c797",
"timestamp": "",
"source": "github",
"line_count": 850,
"max_line_length": 88,
"avg_line_length": 32.654117647058825,
"alnum_prop": 0.609814094249892,
"repo_name": "fbradyirl/home-assistant",
"id": "4d3d4dd841fd726a9b8f5e17a068d25e5f32abe2",
"size": "27756",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
import logging
import os
import sys
from pprint import pprint
from datetime import datetime
from arps.util import LOREM, encrypt_password
import click
from arps.models import *
from flask.cli import with_appcontext
log = logging.getLogger()
@click.group()
def main():
pass
@main.command()
@with_appcontext
def shell():
"""Runs a shell in the app context.
Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import IPython
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nIPython: %s\nApp: %s%s\nInstance: %s\n' % (
sys.version,
sys.platform,
IPython.__version__,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
ctx.update(app.make_shell_context())
IPython.embed(banner1=banner, user_ns=ctx)
def object_to_dict(obj, found=None):
if found is None:
found = set()
mapper = obj.__class__.__mapper__
columns = [column.key for column in mapper.columns]
get_key_value = lambda c: (c, getattr(obj, c).isoformat()) if isinstance(getattr(obj, c), datetime) else (c, getattr(obj, c))
out = dict(map(get_key_value, columns))
for name, relation in mapper.relationships.items():
if relation not in found:
found.add(relation)
related_obj = getattr(obj, name)
if related_obj is not None:
if relation.uselist:
out[name] = [object_to_dict(child, found) for child in related_obj]
else:
out[name] = object_to_dict(related_obj, found)
return out
@main.command(name='wip')
def wip():
u = User.query.get(1)
pprint(object_to_dict(u))
@main.group(name='db')
def database():
pass
@database.command()
def create():
db.create_all()
print("Tables created")
@database.command()
def drop():
db.drop_all()
print("Tables Dropped")
@database.command()
def reset():
db.drop_all()
print("Tables Dropped")
db.create_all()
print("Tables created")
apply_fixtures()
@database.command()
def fixtures():
apply_fixtures()
def apply_fixtures():
#
# User and roles
#
role_admin = Role(name='admin')
role_user = Role(name='user')
saf = User(name='saf', email='[email protected]', password=encrypt_password('oracle'), active=True)
batman = User(name='batman', email='[email protected]', password=encrypt_password('oracle'))
wonderwoman = User(name='wonderwoman', email='[email protected]', password=encrypt_password('oracle'), active=True)
db.session.add_all([role_admin, role_user, saf, batman, wonderwoman])
role_admin.users.extend([saf, batman])
role_user.users.extend([saf, batman, wonderwoman])
#
# Releases
#
upstream = Release(name='upstream', description='Upstream Release, that points to current', release_date=datetime(2000, 1, 1))
tardis = Release(name='tardis', release_date=datetime(2014, 6, 18))
acilaris = Release(name='acilaris', description=LOREM, release_date=datetime(2015, 5, 1))
abwa_2016_01 = Release(name='abwa-2016-01', description="Release Abwa 2016-01", release_date=datetime(2016, 6, 1))
db.session.add_all([upstream, tardis, acilaris, abwa_2016_01])
db.session.flush() # Necessary so the foreign key constraints are fullfilled
#
# Repositories
#
upstream_epel_el_6 = Repository(release_id=upstream.id, name='epel-el6', description="Enterprise Linux Enhanced Packages for EL 6",
behaviour=REPOSITORY_BEHAVIOUR_MANUAL, updated_at=datetime(2016, 10, 22, 12, 00))
upstream_rhel_6_updates = Repository(release_id=upstream.id, name='rhel-updates-el6',
behaviour=REPOSITORY_BEHAVIOUR_MANUAL, updated_at=datetime(2016, 10, 23, 13, 00))
upstream_se2ve = Repository(release_id=upstream.id, name='se2ve',
behaviour=REPOSITORY_BEHAVIOUR_MANUAL, updated_at=datetime(2016, 10, 22, 14, 00))
upstream_docker = Repository(release_id=upstream.id, name='docker-el6', behaviour=REPOSITORY_BEHAVIOUR_MANUAL)
db.session.add_all([upstream_epel_el_6, upstream_rhel_6_updates, upstream_se2ve, upstream_docker])
tardis_epel_el_6 = Repository(release_id=tardis.id, name='epel-el6', description="Enterprise Linux Enhanced Packages for EL 6",
behaviour=REPOSITORY_BEHAVIOUR_COPY, updated_at=datetime(2016, 10, 25, 12, 00))
tardis_epel_el_6.set_origin(upstream_epel_el_6)
tardis_rhel_6_updates = Repository(release_id=tardis.id, name='rhel-updates-el6',
behaviour=REPOSITORY_BEHAVIOUR_COPY, updated_at=datetime(2016, 10, 25, 11, 00))
tardis_rhel_6_updates.set_origin(upstream_rhel_6_updates)
tardis_se2ve = Repository(release_id=tardis.id, name='se2ve', behaviour=REPOSITORY_BEHAVIOUR_LINK)
tardis_se2ve.set_origin(upstream_se2ve)
tardis_docker = Repository(release_id=tardis.id, name='docker-el6',
behaviour=REPOSITORY_BEHAVIOUR_MANUAL, updated_at=datetime(2016, 10, 25, 13, 00))
db.session.add_all([tardis_epel_el_6, tardis_rhel_6_updates, tardis_se2ve, tardis_docker])
upstream_se2ve.set_users([saf])
db.session.commit()
print("Fixtures applied")
| {
"content_hash": "047d813b098112647b780bd1057211a5",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 135,
"avg_line_length": 37.73684210526316,
"alnum_prop": 0.6577754532775453,
"repo_name": "sumpfgottheit/arps",
"id": "13b78e879242078e30ccdec99569dc7424c5088a",
"size": "5736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arps/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1069773"
},
{
"name": "HTML",
"bytes": "62088"
},
{
"name": "JavaScript",
"bytes": "649216"
},
{
"name": "Python",
"bytes": "90610"
},
{
"name": "Shell",
"bytes": "588"
}
],
"symlink_target": ""
} |
from .abstract_db import AbstractDatabaseTask
class BackupTask(AbstractDatabaseTask):
"""
Backs up the database.
"""
name = "backup"
def run(self, filename):
return self.postgres(
"pg_dump -Fc %s > %s" % (self.env.proj_name, filename))
| {
"content_hash": "1ba72efeb81b44f616ebb1b3170b63c0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 23.25,
"alnum_prop": 0.6164874551971327,
"repo_name": "Numerical-Brass/Wool",
"id": "b39102c68fa8237856d5d48f63937698fc56a804",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/deploy/db/backup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31552"
},
{
"name": "Shell",
"bytes": "6455"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class MetricValue(Model):
"""Represents a metric value.
All required parameters must be populated in order to send to Azure.
:param time_stamp: Required. the timestamp for the metric value in ISO
8601 format.
:type time_stamp: datetime
:param average: the average value in the time range.
:type average: float
:param minimum: the least value in the time range.
:type minimum: float
:param maximum: the greatest value in the time range.
:type maximum: float
:param total: the sum of all of the values in the time range.
:type total: float
:param count: the number of samples in the time range. Can be used to
determine the number of values that contributed to the average value.
:type count: long
"""
_validation = {
'time_stamp': {'required': True},
}
_attribute_map = {
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'average': {'key': 'average', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'total': {'key': 'total', 'type': 'float'},
'count': {'key': 'count', 'type': 'long'},
}
def __init__(self, *, time_stamp, average: float=None, minimum: float=None, maximum: float=None, total: float=None, count: int=None, **kwargs) -> None:
super(MetricValue, self).__init__(**kwargs)
self.time_stamp = time_stamp
self.average = average
self.minimum = minimum
self.maximum = maximum
self.total = total
self.count = count
| {
"content_hash": "3e319c73e6aef59fddbf1f9d6d972f25",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 155,
"avg_line_length": 36.44444444444444,
"alnum_prop": 0.6146341463414634,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "c6c65186df2547ed8c3609a5a6f10a29c05f8b76",
"size": "2114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-monitor/azure/mgmt/monitor/models/metric_value_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
"""
Support for Yr.no weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.yr/
"""
import asyncio
import logging
from random import randrange
from xml.parsers.expat import ExpatError
import aiohttp
import async_timeout
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_ELEVATION, CONF_MONITORED_CONDITIONS,
ATTR_ATTRIBUTION, CONF_NAME)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (async_track_utc_time_change,
async_call_later)
from homeassistant.util import dt as dt_util
REQUIREMENTS = ['xmltodict==0.11.0']
_LOGGER = logging.getLogger(__name__)
CONF_ATTRIBUTION = "Weather forecast from met.no, delivered " \
"by the Norwegian Meteorological Institute."
# https://api.met.no/license_data.html
SENSOR_TYPES = {
'symbol': ['Symbol', None],
'precipitation': ['Precipitation', 'mm'],
'temperature': ['Temperature', '°C'],
'windSpeed': ['Wind speed', 'm/s'],
'windGust': ['Wind gust', 'm/s'],
'pressure': ['Pressure', 'hPa'],
'windDirection': ['Wind direction', '°'],
'humidity': ['Humidity', '%'],
'fog': ['Fog', '%'],
'cloudiness': ['Cloudiness', '%'],
'lowClouds': ['Low clouds', '%'],
'mediumClouds': ['Medium clouds', '%'],
'highClouds': ['High clouds', '%'],
'dewpointTemperature': ['Dewpoint temperature', '°C'],
}
CONF_FORECAST = 'forecast'
DEFAULT_FORECAST = 0
DEFAULT_NAME = 'yr'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ELEVATION): vol.Coerce(int),
vol.Optional(CONF_FORECAST, default=DEFAULT_FORECAST): vol.Coerce(int),
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_MONITORED_CONDITIONS, default=['symbol']):
vol.All(cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Yr.no sensor."""
elevation = config.get(CONF_ELEVATION, hass.config.elevation or 0)
forecast = config.get(CONF_FORECAST)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
name = config.get(CONF_NAME)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
coordinates = {
'lat': str(latitude),
'lon': str(longitude),
'msl': str(elevation),
}
dev = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
dev.append(YrSensor(name, sensor_type))
async_add_entities(dev)
weather = YrData(hass, coordinates, forecast, dev)
async_track_utc_time_change(hass, weather.updating_devices,
minute=31, second=0)
await weather.fetching_data()
class YrSensor(Entity):
"""Representation of an Yr.no sensor."""
def __init__(self, name, sensor_type):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def entity_picture(self):
"""Weather symbol if type is symbol."""
if self.type != 'symbol':
return None
return "https://api.met.no/weatherapi/weathericon/1.1/" \
"?symbol={0};content_type=image/png".format(self._state)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
}
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
class YrData:
"""Get the latest data and updates the states."""
def __init__(self, hass, coordinates, forecast, devices):
"""Initialize the data object."""
self._url = 'https://aa015h6buqvih86i1.api.met.no/'\
'weatherapi/locationforecast/1.9/'
self._urlparams = coordinates
self._forecast = forecast
self.devices = devices
self.data = {}
self.hass = hass
async def fetching_data(self, *_):
"""Get the latest data from yr.no."""
import xmltodict
def try_again(err: str):
"""Retry in 15 to 20 minutes."""
minutes = 15 + randrange(6)
_LOGGER.error("Retrying in %i minutes: %s", minutes, err)
async_call_later(self.hass, minutes*60, self.fetching_data)
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(10, loop=self.hass.loop):
resp = await websession.get(
self._url, params=self._urlparams)
if resp.status != 200:
try_again('{} returned {}'.format(resp.url, resp.status))
return
text = await resp.text()
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
try_again(err)
return
try:
self.data = xmltodict.parse(text)['weatherdata']
except (ExpatError, IndexError) as err:
try_again(err)
return
await self.updating_devices()
async_call_later(self.hass, 60*60, self.fetching_data)
async def updating_devices(self, *_):
"""Find the current data from self.data."""
if not self.data:
return
now = dt_util.utcnow()
forecast_time = now + dt_util.dt.timedelta(hours=self._forecast)
# Find the correct time entry. Since not all time entries contain all
# types of data, we cannot just select one. Instead, we order them by
# distance from the desired forecast_time, and for every device iterate
# them in order of increasing distance, taking the first time_point
# that contains the desired data.
ordered_entries = []
for time_entry in self.data['product']['time']:
valid_from = dt_util.parse_datetime(time_entry['@from'])
valid_to = dt_util.parse_datetime(time_entry['@to'])
if now >= valid_to:
# Has already passed. Never select this.
continue
average_dist = (abs((valid_to - forecast_time).total_seconds()) +
abs((valid_from - forecast_time).total_seconds()))
ordered_entries.append((average_dist, time_entry))
ordered_entries.sort(key=lambda item: item[0])
# Update all devices
tasks = []
if ordered_entries:
for dev in self.devices:
new_state = None
for (_, selected_time_entry) in ordered_entries:
loc_data = selected_time_entry['location']
if dev.type not in loc_data:
continue
if dev.type == 'precipitation':
new_state = loc_data[dev.type]['@value']
elif dev.type == 'symbol':
new_state = loc_data[dev.type]['@number']
elif dev.type in ('temperature', 'pressure', 'humidity',
'dewpointTemperature'):
new_state = loc_data[dev.type]['@value']
elif dev.type in ('windSpeed', 'windGust'):
new_state = loc_data[dev.type]['@mps']
elif dev.type == 'windDirection':
new_state = float(loc_data[dev.type]['@deg'])
elif dev.type in ('fog', 'cloudiness', 'lowClouds',
'mediumClouds', 'highClouds'):
new_state = loc_data[dev.type]['@percent']
break
# pylint: disable=protected-access
if new_state != dev._state:
dev._state = new_state
tasks.append(dev.async_update_ha_state())
if tasks:
await asyncio.wait(tasks, loop=self.hass.loop)
| {
"content_hash": "8756cb83fb2072e00433d802b61b0a35",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 79,
"avg_line_length": 35.00389105058366,
"alnum_prop": 0.5840373499333037,
"repo_name": "PetePriority/home-assistant",
"id": "0cb9c3765ecab51c8c31a3c7a560f7b8275a786c",
"size": "8999",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/yr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from prompt_toolkit.shortcuts import get_input
from prompt_toolkit.filters import Always
if __name__ == '__main__':
print('If you press meta-! or esc-! at the following prompt, you can enter system commands.')
answer = get_input('Give me some input: ', enable_system_bindings=Always())
print('You said: %s' % answer)
| {
"content_hash": "5e9182e820e34e55778b195a89e850db",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 97,
"avg_line_length": 41.22222222222222,
"alnum_prop": 0.7008086253369272,
"repo_name": "jaseg/python-prompt-toolkit",
"id": "0499f45dfd26116303eb740054be5deb8d99f11c",
"size": "393",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/system-prompt.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "558512"
}
],
"symlink_target": ""
} |
__author__ = 'johannes'
from flask import render_template, jsonify, url_for
from devviz import data_handler, app
from devviz.utils import sse_route
from devviz.views import View, Variable
import json
import time
@app.route('/variables/stream')
@sse_route
def variables_stream():
while True:
vars = [{"name": var,
"type": data_handler.get_type(var)}
for var in data_handler.get_variables()]
yield json.dumps({"data": vars})
time.sleep(.1)
@app.route('/variables')
def variables():
vars = [{"name": var,
"type": data_handler.get_type(var)}
for var in data_handler.get_variables()]
return jsonify({"data": vars})
class VariablesView(View):
with app.app_context():
script = ('<script src="https://cdnjs.cloudflare.com/ajax/libs/react/'
'0.13.3/react.js"></script>\n'
'<script src="{}"></script>\n'
.format(url_for('static', filename='js/variablesview.js')))
url = 'variables'
name = 'Variables'
def __init__(self, variables=None, viewid=None):
super(VariablesView, self).__init__(variables, viewid)
@property
def content(self):
variables = data_handler.get_variables()
vars = [Variable(name=var, type=data_handler.get_type(var))
for var in variables]
return render_template("variables.html", variables=vars,
viewid=self.viewid)
app.views[VariablesView.url] = VariablesView
| {
"content_hash": "580cbfd8e2beb5a5c0adbfd4dfab43b3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 30.176470588235293,
"alnum_prop": 0.5997400909681612,
"repo_name": "hildensia/devviz",
"id": "d751d3cf9f41e91d8edfa2e9e621902ebf32ba12",
"size": "1539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devviz/views/variables.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7248"
},
{
"name": "CSS",
"bytes": "12878"
},
{
"name": "HTML",
"bytes": "8610"
},
{
"name": "JavaScript",
"bytes": "167165"
},
{
"name": "Makefile",
"bytes": "7411"
},
{
"name": "Python",
"bytes": "26797"
},
{
"name": "Shell",
"bytes": "129"
}
],
"symlink_target": ""
} |
import urllib.request
#first run
#num = "12345"
#second part
num = str(int(92118/2))
url = "http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing="
for x in range(400):
page = urllib.request.urlopen(url + str(num))
mystr = page.read().decode()
parts = mystr.split()
num = parts[len(parts)-1]
print(num)
if not num.isdigit():
break
print("Final: " + mystr)
| {
"content_hash": "09adaef6b34b96c4c4d0f97e0433d630",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 18.636363636363637,
"alnum_prop": 0.6195121951219512,
"repo_name": "feliposz/python-challenge-solutions",
"id": "1b4bd3cfa3d81b2e237d9c6693abc6898b696d76",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "level4-linkedlist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "6436"
},
{
"name": "OpenEdge ABL",
"bytes": "10004"
},
{
"name": "Python",
"bytes": "293733"
}
],
"symlink_target": ""
} |
from sqlalchemy import (
Column,
ForeignKey,
Integer,
String,
Enum,
Boolean,
Date,
Table
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from marshmallow_sqlalchemy import ModelSchema
Base = declarative_base()
TimeOfDayEnum = (
'Morning',
'Afternoon',
)
BirdEnum = (
'American Wigeon',
'Blue-winged Teal',
'Bufflehead',
'Canada',
'Canvasback',
'Common Goldeneye',
'Gadwall',
'Green-winged Teal',
'Greater Scaup',
'Hooded Merganser',
'Lesser Scaup',
'Redhead',
'Ring-necked Duck',
'Mallard',
'Northern Pintail',
'Northern Shoveler',
'Wood Duck'
)
BirdGenderEnum = (
'Drake',
'Hen',
'Unknown'
)
class Hunt(Base):
__tablename__ = 'hunt'
id = Column(Integer, primary_key=True)
date = Column(Date, nullable=False)
location = Column(String(50), nullable=False)
timeofday = Column(Enum(*TimeOfDayEnum), nullable=False)
hunters = relationship('Hunter', secondary='hunt_hunter')
birds = relationship('Bird', backref='hunt',
cascade='all, delete, delete-orphan')
def __repr__(self):
return '<Hunt(id=%s, date=%s, location=%s, timeofday=%s)>'\
% (self.id, self.date, self.location, self.timeofday)
class Hunter(Base):
__tablename__ = 'hunter'
id = Column(Integer, primary_key=True)
firstname = Column(String(20), nullable=False)
lastname = Column(String(50), nullable=False)
def __repr__(self):
return '<Hunter(id=%s, firstname=%s, lastname=%s)>'\
% (self.id, self.firstname, self.lastname)
class HuntHunter(Base):
__tablename__ = 'hunt_hunter'
hunt_id = Column(Integer, ForeignKey('hunt.id'), primary_key=True)
hunter_id = Column(Integer, ForeignKey('hunter.id'), primary_key=True)
hunter = relationship('Hunter', backref='hunt_associations')
hunt = relationship('Hunt', backref='hunter_associations')
def __repr__(self):
return '<HuntHunter(hunt_id=%s, hunter_id=%s)>'\
% (self.hunt_id, self.hunter_id)
class Bird(Base):
__tablename__ = 'bird'
id = Column(Integer, primary_key=True)
species = Column(Enum(*BirdEnum), nullable=False)
gender = Column(Enum(*BirdGenderEnum), nullable=False)
banded = Column(Boolean, default=False)
lost = Column(Boolean, default=False)
mounted = Column(Boolean, default=False)
Hunt_id = Column(Integer, ForeignKey('hunt.id'))
def __repr__(self):
return '<Bird(id=%s, species=%s, gender=%s,\
banded=%s, lost=%s, mounted=%s)>'\
% (self.id, self.species, self.gender,
self.banded, self.lost, self.mounted)
class HuntSchema(ModelSchema):
class Meta:
model = Hunt
class HunterSchema(ModelSchema):
class Meta:
model = Hunter
class HuntHunterSchema(ModelSchema):
class Meta:
model = HuntHunter
class BirdSchema(ModelSchema):
class Meta:
model = Bird
| {
"content_hash": "ba17df652c39ef7bb553e19002d01d50",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 74,
"avg_line_length": 25.073170731707318,
"alnum_prop": 0.6167315175097277,
"repo_name": "bschuweiler/hunting-journal",
"id": "d0b266072128ec4d9be5384ed9e4c4b1f70aa44f",
"size": "3084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chalicelib/orm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11197"
},
{
"name": "JavaScript",
"bytes": "5235"
},
{
"name": "Python",
"bytes": "15486"
}
],
"symlink_target": ""
} |
__author__ = 'fmoscato'
"""
The Publication DAO handles interactions with the publication collection
The DAO provides 3 levels interface (da scrivere meglio)
1 - ADMIN can add publications + validate
2- search publications
3- users level
"""
import sys
import re
from datetime import datetime
import json
import ast
import pymongo
import bson
import constants as c
class PublicationDAO(object):
def __init__(self, database):
self.db = database
self.publications = database.publications
def add_publication(self, **kwargs):
"""
insert publication.
param: **kwargs
return : true || false
"""
#this piece is done for the not refeered publications, that some times can have 00 instead
#of a valid month
error = {'general': ''}
if 'publication_obj' in kwargs.keys():
publication = kwargs['publication_obj'].__dict__
else:
pub_date = datetime.strptime(kwargs["pub_date"], c.SHORT_DATE_FORMAT)
# Build a new publication
publication = { "title": unicode(kwargs["title"], 'utf-8', 'ignore'),
"author": kwargs["author"].decode('utf8', 'ignore'),
"authors_to_show": kwargs["authors_to_show"],
"ASI_authors": kwargs["ASI_authors"],
"project_mission": kwargs["project_mission"],
"project": kwargs.get("project", ""),
"contracts": kwargs.get("contracts", []),
"pub_date": pub_date,
"abstract": unicode(kwargs["abstract"], 'utf-8', 'ignore'),
"keyword": kwargs["keyword"],
"link": [kwargs["link"]],
"journal": kwargs["journal"],
"number": kwargs.get("number", ""),
"volume": kwargs.get("volume", ""),
"pages": kwargs.get("pages", ""),
"series": kwargs.get("series", ""),
"type": kwargs.get("type", 'Article Journal'),
"doi": kwargs.get("doi"),
"issn": kwargs.get("issn"),
"isbn": kwargs.get("isbn"),
"publisher": kwargs.get("publisher"),
"booktitle": kwargs.get("booktitle"),
"eventname": kwargs.get("eventname"),
'academic_year': kwargs.get("academic_year"),
'university': kwargs.get("university"),
'code': kwargs.get("code"),
"note": kwargs.get("note", "")
}
publication = self.clean_publication_dictionary(publication)
# now insert the publication
try:
self.publications.insert(publication)
except pymongo.errors.DuplicateKeyError:
error['general'] = "Publication %s already in the DB" % publication["title"]
except pymongo.errors.OperationFailure:
error['general'] = "Error inserting publication %s: %s" % (publication["title"], sys.exc_info()[0])
return True, error
def remove_publication(self, _id):
try:
print "removing publication %s" % _id
self.publications.remove({'_id': bson.ObjectId(_id)})
except pymongo.errors.OperationFailure:
print "oops, mongo error"
return False
return True
# return an array of publications still open sorting descending for pub_date, in short format
def get_publications(self, **kwargs):
pub, cursor = [], None
query = {}
sort_score = False
options = {}
kwargs_keys = kwargs.keys()
if set(['type']).issubset(kwargs_keys):
query['type'] = kwargs['type']
if set(['start_date', 'end_date']).issubset(kwargs_keys):
query['pub_date'] = {'$gte': kwargs['start_date'], '$lte': kwargs['end_date']}
if set(['authors', 'condition_authors']).issubset(kwargs_keys):
condition = kwargs['condition_authors']
if condition == 'OR':
query['author'] = {'$in': [re.compile(aut, re.IGNORECASE) for aut in kwargs['authors']]}
elif condition == 'AND':
query['$and'] = [dict(author={'$regex': auth, '$options': 'i'})for auth in kwargs['authors']]
#search for single author
if set(['author']).issubset(kwargs_keys):
query['ASI_authors'] = kwargs['author']
if set(['title']).issubset(kwargs_keys) and kwargs['title'].strip():
# title has the text index on it
query['$text'] = {'$search': kwargs['title']}
options['score'] = {'$meta': "textScore"}
sort_score = True
if set(['projects_missions', 'condition_category']).issubset(kwargs_keys):
condition = kwargs['condition_category']
if condition == 'OR':
query['project_mission'] = {'$in': [re.compile(p_m, re.IGNORECASE)for p_m in kwargs['projects_missions']]}
elif condition == 'AND':
query['$and'] = [dict(project_mission={'$regex': p_m, '$options': 'i'})for p_m in kwargs['projects_missions']]
if set(['projects', 'condition_projects']).issubset(kwargs_keys):
condition = kwargs['condition_projects']
if condition == 'OR':
query['project'] = {'$in': [re.compile(p, re.IGNORECASE) for p in kwargs['projects']]}
elif condition == 'AND':
query['$and'] = [dict(project={'$regex': p, '$options': 'i'})for p in kwargs['projects']]
if set(['type', 'condition_type']).issubset(kwargs_keys):
condition = kwargs['condition_type']
if condition == 'OR':
query['type'] = {'$in': [re.compile(t, re.IGNORECASE) for t in kwargs['type']]}
elif condition == 'AND':
query['$and'] = [dict(type={'$regex': t, '$options': 'i'})for t in kwargs['type']]
if set(['doi']).issubset(kwargs_keys):
query['doi'] = {'$regex': kwargs['doi'], '$options': 'i'}
if set(['issn']).issubset(kwargs_keys):
query['issn'] = {'$regex': kwargs['issn'], '$options': 'i'}
if set(['isbn']).issubset(kwargs_keys):
query['isbn'] = {'$regex': kwargs['isbn'], '$options': 'i'}
if set(['keywords', 'condition_keywords']).issubset(kwargs_keys):
condition = kwargs['condition_keywords']
if condition == 'OR':
query['keyword'] = {'$in': [re.compile(k, re.IGNORECASE) for k in kwargs['keywords']]}
elif condition == 'AND':
query['$and'] = [dict(keyword={'$regex': k, '$options': 'i'})for k in kwargs['keywords']]
try:
if sort_score:
cursor = self.publications.find(query, options)
cursor.sort([('score', {'$meta': 'textScore'})])
else:
cursor = self.publications.find(query).sort("pub_date", pymongo.DESCENDING)
except pymongo.errors.OperationFailure:
print "oops, mongo error"
return False
for publication in cursor:
pub.append({'id': publication['_id'],
'title': publication['title'],
'authors': publication['authors_to_show'],
'pub_date': publication['pub_date'].strftime(c.SHORT_DATE_FORMAT),
'type': publication["type"],
'ASI_authors': publication.get('ASI_authors', ''),
'project_mission': publication.get('project_mission', ''),
'project': publication.get('project', '')
})
return pub
def update_categories(self, _id, categories_list):
try:
self.publications.update({'_id': bson.ObjectId(_id)},
{'$set': {'category': categories_list}})
except pymongo.errors.OperationFailure:
print "Mongo error, updating category in publication %s" % _id
return False
return True
def update_projects(self, _id, projects_list):
try:
self.publications.update({'_id': bson.ObjectId(_id)},
{'$set': {'project': projects_list}})
except pymongo.errors.OperationFailure:
print "Mongo error, updating project in publication %s" % _id
return False
return True
def update_contracts(self, _id, contract_list):
try:
self.publications.update({'_id': bson.ObjectId(_id)},
{'$set': {'contract': contract_list}})
except pymongo.errors.OperationFailure:
print "Mongo error, updating contracts in publication %s" % _id
return False
return True
def update_publication(self, publication):
try:
_id = publication['_id']
#then i have to remove, otherwhise it will not update
del publication['_id']
publication = self.clean_publication_dictionary(publication)
self.publications.update({'_id': bson.ObjectId(_id)},
publication)
except pymongo.errors.OperationFailure:
print "Mongo error, updating the whole publication %s" % _id
return False
return True
def get_publication_id(self, id):
return self.publications.find_one({'_id': bson.ObjectId(id)})
def get_publications_type(self):
r = self.publications.distinct('type')
#to convert in a quick and dirty way unicode to str
return ast.literal_eval(json.dumps(r))
def clean_publication_dictionary(self, pub_to_save):
"""
remove all empty fields from the dictionary before to save in DB
:param pub_to_save:
:return: dictionary clean
"""
return (dict((k, v) for k, v in pub_to_save.items() if v))
| {
"content_hash": "916cad48ca98cc013448c640bc2f845d",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 126,
"avg_line_length": 34.324503311258276,
"alnum_prop": 0.5213196990160139,
"repo_name": "lbmm/S.E.Arch",
"id": "c3d8783419d9186eaee8faa198462fe350174e8d",
"size": "10366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pubblicazioniASI/publicationDAO.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36272"
},
{
"name": "JavaScript",
"bytes": "501527"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Python",
"bytes": "131809"
},
{
"name": "Shell",
"bytes": "101"
},
{
"name": "Smarty",
"bytes": "171261"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use :mod:`kubernetes.client.models.V1VolumeMount`."""
import warnings
with warnings.catch_warnings():
from airflow.providers.cncf.kubernetes.backcompat.volume_mount import VolumeMount # noqa: autoflake
warnings.warn(
"This module is deprecated. Please use `kubernetes.client.models.V1VolumeMount`.",
DeprecationWarning,
stacklevel=2,
)
| {
"content_hash": "5f76df2ecf90f57bd1c5139edb87d336",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 104,
"avg_line_length": 35.63636363636363,
"alnum_prop": 0.7653061224489796,
"repo_name": "lyft/incubator-airflow",
"id": "aff5f30d5840e5200362bc88146f8ae2462e1200",
"size": "1179",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/kubernetes/volume_mount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
} |
import os
import bitcoin
import keystore
from keystore import bip44_derivation
from wallet import Wallet, Imported_Wallet, Standard_Wallet, Multisig_Wallet, wallet_types
from i18n import _
from plugins import run_hook
class BaseWizard(object):
def __init__(self, config, storage):
super(BaseWizard, self).__init__()
self.config = config
self.storage = storage
self.wallet = None
self.stack = []
self.plugin = None
self.keystores = []
self.is_kivy = config.get('gui') == 'kivy'
self.seed_type = None
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
f(self, *args)
elif hasattr(self, action):
f = getattr(self, action)
f(*args)
else:
raise BaseException("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Create") + ' ' + name.decode('utf8')
message = '\n'.join([
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Watch Vertcoin addresses")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def load_2fa(self):
self.storage.put('wallet_type', '2fa')
self.storage.put('use_trustedcoin', True)
self.plugin = self.plugins.load_plugin('trustedcoin')
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.load_2fa()
action = self.storage.get_action()
elif choice == 'imported':
action = 'import_addresses'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
i = len(self.keystores)
title = _('Add cosigner') + ' (%d of %d)'%(i+1, self.n) if self.wallet_type=='multisig' else _('Keystore')
if self.wallet_type =='standard' or i==0:
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_from_seed', _('I already have a seed')),
('restore_from_key', _('Use public or private keys')),
]
#if not self.is_kivy:
# choices.append(('choose_hw_device', _('Use a hardware device')))
else:
message = _('Add a cosigner to your multi-sig wallet')
choices = [
('restore_from_key', _('Enter cosigner key')),
('restore_from_seed', _('Enter cosigner seed')),
]
#if not self.is_kivy:
# choices.append(('choose_hw_device', _('Cosign with hardware device')))
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def import_addresses(self):
v = keystore.is_address_list
title = _("Import Vertcoin Addresses")
message = _("Enter a list of Vertcoin addresses. This will create a watching-only wallet.")
self.add_xpub_dialog(title=title, message=message, run_next=self.on_import_addresses, is_valid=v)
def on_import_addresses(self, text):
assert keystore.is_address_list(text)
self.wallet = Imported_Wallet(self.storage)
for x in text.split():
self.wallet.import_address(x)
self.terminate()
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_any_key
title = _("Create keystore from keys")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv), or a list of Vertcoin private keys.")
])
self.add_xpub_dialog(title=title, message=message, run_next=self.on_restore_from_key, is_valid=v)
else:
v = keystore.is_bip32_key
i = len(self.keystores) + 1
self.add_cosigner_dialog(index=i, run_next=self.on_restore_from_key, is_valid=v)
def on_restore_from_key(self, text):
k = keystore.from_keys(text)
self.on_keystore(k)
def choose_hw_device(self):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device())
return
# scan devices
devices = []
devmgr = self.plugins.device_manager
for name, description, plugin in support:
try:
# FIXME: side-effect: unpaired_device_info sets client.handler
u = devmgr.unpaired_device_infos(None, plugin)
except:
devmgr.print_error("error", name)
continue
devices += map(lambda x: (name, x), u)
if not devices:
msg = ''.join([
_('No hardware device detected.') + '\n',
_('To trigger a rescan, press \'Next\'.') + '\n\n',
_('If your device is not detected on Windows, go to "Settings", "Devices", "Connected devices", and do "Remove device". Then, plug your device again.') + ' ',
_('On Linux, you might have to add a new permission to your udev rules.'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device())
return
# select device
self.devices = devices
choices = []
for name, info in devices:
state = _("initialized") if info.initialized else _("wiped")
label = info.label or _("An unnamed %s")%name
descr = "%s [%s, %s]" % (label, name, state)
choices.append(((name, info), descr))
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next=self.on_device)
def on_device(self, name, device_info):
self.plugin = self.plugins.get_plugin(name)
try:
self.plugin.setup_device(device_info, self)
except BaseException as e:
self.show_error(str(e))
self.choose_hw_device()
return
if self.wallet_type=='multisig':
# There is no general standard for HD multisig.
# This is partially compatible with BIP45; assumes index=0
self.on_hw_derivation(name, device_info, "m/45'/0")
else:
f = lambda x: self.run('on_hw_derivation', name, device_info, str(x))
self.derivation_dialog(f)
def derivation_dialog(self, f):
default = bip44_derivation(0)
message = '\n'.join([
_('Enter your wallet derivation here.'),
_('If you are not sure what this is, leave this field unchanged.')
])
self.line_dialog(run_next=f, title=_('Derivation'), message=message, default=default, test=bitcoin.is_bip32_derivation)
def on_hw_derivation(self, name, device_info, derivation):
from keystore import hardware_keystore
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, self)
if xpub is None:
self.show_error('Cannot read xpub from device')
return
d = {
'type': 'hardware',
'hw_type': name,
'derivation': derivation,
'xpub': xpub,
'label': device_info.label,
}
k = hardware_keystore(d)
self.on_keystore(k)
def passphrase_dialog(self, run_next):
title = _('Seed extension')
message = '\n'.join([
_('You may extend your seed with custom words.'),
_('Your seed extension must be saved together with your seed.'),
])
warning = '\n'.join([
_('Note that this is NOT your encryption password.'),
_('If you do not know what this is, leave this field empty.'),
])
self.line_dialog(title=title, message=message, warning=warning, default='', test=lambda x:True, run_next=run_next)
def restore_from_seed(self):
self.opt_bip39 = True
self.opt_ext = True
test = bitcoin.is_seed if self.wallet_type == 'standard' else bitcoin.is_new_seed
self.restore_seed_dialog(run_next=self.on_restore_seed, test=test)
def on_restore_seed(self, seed, is_bip39, is_ext):
self.seed_type = 'bip39' if is_bip39 else bitcoin.seed_type(seed)
if self.seed_type == 'bip39':
f = lambda passphrase: self.on_restore_bip39(seed, passphrase)
self.passphrase_dialog(run_next=f) if is_ext else f('')
elif self.seed_type in ['standard', 'segwit']:
f = lambda passphrase: self.run('create_keystore', seed, passphrase)
self.passphrase_dialog(run_next=f) if is_ext else f('')
elif self.seed_type == 'old':
self.run('create_keystore', seed, '')
elif self.seed_type == '2fa':
if self.is_kivy:
self.show_error('2FA seeds are not supported in this version')
self.run('restore_from_seed')
else:
self.load_2fa()
self.run('on_restore_seed', seed, is_ext)
else:
raise BaseException('Unknown seed type', seed_type)
def on_restore_bip39(self, seed, passphrase):
f = lambda x: self.run('on_bip44', seed, passphrase, str(x))
self.derivation_dialog(f)
def create_keystore(self, seed, passphrase):
k = keystore.from_seed(seed, passphrase)
self.on_keystore(k)
def on_bip44(self, seed, passphrase, derivation):
k = keystore.BIP32_KeyStore({})
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
k.add_xprv_from_seed(bip32_seed, 0, derivation)
self.on_keystore(k)
def on_keystore(self, k):
if self.wallet_type == 'standard':
self.keystores.append(k)
self.run('create_wallet')
elif self.wallet_type == 'multisig':
if k.xpub in map(lambda x: x.xpub, self.keystores):
self.show_error(_('Error: duplicate master public key'))
self.run('choose_keystore')
return
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
self.run('create_wallet')
def create_wallet(self):
if any(k.may_have_password() for k in self.keystores):
self.request_password(run_next=self.on_password)
else:
self.on_password(None, False)
def on_password(self, password, encrypt):
self.storage.set_password(password, encrypt)
for k in self.keystores:
if k.may_have_password():
k.update_password(None, password)
if self.wallet_type == 'standard':
self.storage.put('seed_type', self.seed_type)
self.storage.put('keystore', k.dump())
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def add_cosigners(self, password, i):
self.add_cosigner_dialog(run_next=lambda x: self.on_cosigner(x, password, i), index=i, is_valid=keystore.is_xpub)
def on_cosigner(self, text, password, i):
k = keystore.from_keys(text, password)
self.on_keystore(k)
def create_seed(self):
import mnemonic
self.seed_type = 'segwit' if bitcoin.TESTNET and self.config.get('segwit') else 'standard'
seed = mnemonic.Mnemonic('en').make_seed(self.seed_type)
self.opt_bip39 = False
f = lambda x: self.request_passphrase(seed, x)
self.show_seed_dialog(run_next=f, seed_text=seed)
def request_passphrase(self, seed, opt_passphrase):
if opt_passphrase:
f = lambda x: self.confirm_seed(seed, x)
self.passphrase_dialog(run_next=f)
else:
self.run('confirm_seed', seed, '')
def confirm_seed(self, seed, passphrase):
f = lambda x: self.confirm_passphrase(seed, passphrase)
self.confirm_seed_dialog(run_next=f, test=lambda x: x==seed)
def confirm_passphrase(self, seed, passphrase):
f = lambda x: self.run('create_keystore', seed, x)
if passphrase:
title = _('Confirm Seed Extension')
message = '\n'.join([
_('Your seed extension must be saved together with your seed.'),
_('Please type it here.'),
])
self.line_dialog(run_next=f, title=title, message=message, default='', test=lambda x: x==passphrase)
else:
f('')
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait.")
self.waiting_dialog(task, msg)
| {
"content_hash": "effbe19d16e6baac7f68568a6813f4db",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 174,
"avg_line_length": 41.22554347826087,
"alnum_prop": 0.568255223782216,
"repo_name": "vertcoin/electrum-vtc",
"id": "0e647bcf2aa6e4634ea910779009f652b0ca82b6",
"size": "16334",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/base_wizard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3869"
},
{
"name": "Makefile",
"bytes": "840"
},
{
"name": "NSIS",
"bytes": "7164"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Python",
"bytes": "1537179"
},
{
"name": "Shell",
"bytes": "8769"
}
],
"symlink_target": ""
} |
import numpy as np
from keras.layers import Lambda, Merge
from keras.layers.convolutional import Convolution2D
from keras import backend as K
from keras.engine import Layer
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
a = K.permute_dimensions(square, (0,2,3,1));
extra_channels = K.spatial_2d_padding(a, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
def splittensor(axis=1, ratio_split=1, id_split=0,**kwargs):
def f(X):
div = X.shape[axis] // ratio_split
if axis == 0:
output = X[id_split*div:(id_split+1)*div,:,:,:]
elif axis == 1:
output = X[:, id_split*div:(id_split+1)*div, :, :]
elif axis == 2:
output = X[:,:,id_split*div:(id_split+1)*div,:]
elif axis == 3:
output = X[:,:,:,id_split*div:(id_split+1)*div]
else:
raise ValueError("This axis is not possible")
return output
def g(input_shape):
output_shape=list(input_shape)
output_shape[axis] = output_shape[axis] // ratio_split
return tuple(output_shape)
return Lambda(f,output_shape=lambda input_shape:g(input_shape),**kwargs)
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
def f(input):
return Merge([
Convolution2D(nb_filter//n_group,nb_row,nb_col)(
splittensor(axis=1,
ratio_split=n_group,
id_split=i)(input))
for i in range(n_group)
],mode='concat',concat_axis=1)
return f
class Softmax4D(Layer):
def __init__(self, axis=-1,**kwargs):
self.axis=axis
super(Softmax4D, self).__init__(**kwargs)
def build(self,input_shape):
pass
def call(self, x,mask=None):
e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
s = K.sum(e, axis=self.axis, keepdims=True)
return e / s
def get_output_shape_for(self, input_shape):
return input_shape
| {
"content_hash": "1a01bc92a019bb277a26be648ba3a5f6",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 29.761904761904763,
"alnum_prop": 0.5676,
"repo_name": "babraham123/deepdriving",
"id": "7c61c7a70ba721cac5427b691fd4688378796f6d",
"size": "2500",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "convnetskeras/customlayers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "121"
},
{
"name": "Python",
"bytes": "278717"
},
{
"name": "Shell",
"bytes": "206"
}
],
"symlink_target": ""
} |
'''
InfCommonObject
'''
## InfLineCommentObject
#
# Comment Object for any line in the INF file
#
# #
# # HeaderComment
# #
# Line # TailComment
#
class InfLineCommentObject():
def __init__(self):
self.HeaderComments = ''
self.TailComments = ''
def SetHeaderComments(self, HeaderComments):
self.HeaderComments = HeaderComments
def GetHeaderComments(self):
return self.HeaderComments
def SetTailComments(self, TailComments):
self.TailComments = TailComments
def GetTailComments(self):
return self.TailComments
## CurrentLine
#
class CurrentLine():
def __init__(self):
self.LineNo = ''
self.LineString = ''
self.FileName = ''
## SetLineNo
#
# @param LineNo: LineNo
#
def SetLineNo(self, LineNo):
self.LineNo = LineNo
## GetLineNo
#
def GetLineNo(self):
return self.LineNo
## SetLineString
#
# @param LineString: Line String content
#
def SetLineString(self, LineString):
self.LineString = LineString
## GetLineString
#
def GetLineString(self):
return self.LineString
## SetFileName
#
# @param FileName: File Name
#
def SetFileName(self, FileName):
self.FileName = FileName
## GetFileName
#
def GetFileName(self):
return self.FileName
##
# Inf Section common data
#
class InfSectionCommonDef():
def __init__(self):
#
# #
# # HeaderComments at here
# #
# [xxSection] TailComments at here
# data
#
self.HeaderComments = ''
self.TailComments = ''
#
# The support arch list of this section
#
self.SupArchList = []
#
# Store all section content
# Key is supported Arch
#
self.AllContent = {}
## SetHeaderComments
#
# @param HeaderComments: HeaderComments
#
def SetHeaderComments(self, HeaderComments):
self.HeaderComments = HeaderComments
## GetHeaderComments
#
def GetHeaderComments(self):
return self.HeaderComments
## SetTailComments
#
# @param TailComments: TailComments
#
def SetTailComments(self, TailComments):
self.TailComments = TailComments
## GetTailComments
#
def GetTailComments(self):
return self.TailComments
## SetSupArchList
#
# @param Arch: Arch
#
def SetSupArchList(self, Arch):
if Arch not in self.SupArchList:
self.SupArchList.append(Arch)
## GetSupArchList
#
def GetSupArchList(self):
return self.SupArchList
## SetAllContent
#
# @param ArchList: ArchList
# @param Content: Content
#
def SetAllContent(self, Content):
self.AllContent = Content
## GetAllContent
#
def GetAllContent(self):
return self.AllContent
| {
"content_hash": "794b3297298f76fad6941143363c415d",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 48,
"avg_line_length": 22.221476510067113,
"alnum_prop": 0.5288432497734823,
"repo_name": "tianocore/buildtools-BaseTools",
"id": "217b0941dac4df5810a7b8c46242da649b734783",
"size": "3921",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Source/Python/UPT/Object/Parser/InfCommonObject.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2774879"
},
{
"name": "C++",
"bytes": "516623"
},
{
"name": "GAP",
"bytes": "374642"
},
{
"name": "Objective-C",
"bytes": "106673"
},
{
"name": "Python",
"bytes": "5675290"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "Shell",
"bytes": "49564"
}
],
"symlink_target": ""
} |
"""Setup file.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['tensorflow>=1.12',
'adanet==0.5.0']
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='improve nas model'
)
| {
"content_hash": "72aa42964a40fd18ff02c626f40c8337",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 72,
"avg_line_length": 31.133333333333333,
"alnum_prop": 0.7398286937901499,
"repo_name": "tensorflow/adanet",
"id": "2e349eb3f4529983a46782967ce6c24f83d2ba20",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/improve_nas/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1914501"
},
{
"name": "Python",
"bytes": "1047162"
},
{
"name": "Shell",
"bytes": "2927"
},
{
"name": "Starlark",
"bytes": "28690"
}
],
"symlink_target": ""
} |
import glob
import hashlib
import logging
import os
import re
import shutil
import tempfile
import requests
from platforms.common import ReleaseException, run
from releases import get_version_and_timestamp_from_release
def brew(homebrew_dir, command, *run_args, **run_kwargs):
"""
Run brew that is installed in the specified prefix.
Args:
homebrew_dir: The path containing bin/brew. e.g. /usr/local
command: The list of args to pass to the brew command
run_args: Extra args to send to platforms.common.run
run_kwargs: Extra kwargs to send to platforms.common.run
Returns:
Result from subprocess.run
"""
brew_path = os.path.join(homebrew_dir, "bin", "brew")
return run([brew_path] + command, *run_args, **run_kwargs)
def install_homebrew(homebrew_dir):
logging.info("Installing homebrew to {}".format(homebrew_dir))
if not os.path.exists(homebrew_dir):
os.makedirs(homebrew_dir)
logging.info("Downloading homebrew...")
response = requests.get(
"https://github.com/Homebrew/brew/tarball/master", stream=True
)
response.raise_for_status()
with tempfile.NamedTemporaryFile() as fout:
for chunk in response.iter_content(1024 * 1024):
fout.write(chunk)
fout.flush()
logging.info("Extracting homebrew...")
run(["tar", "xzf", fout.name, "--strip", "1", "-C", homebrew_dir])
logging.info("Extracted homebrew")
def fetch_tarball_sha256(url):
"""Get the sha256 of a tarball"""
logging.info("Fetching tarball from {}...".format(url))
response = requests.get(url, stream=True)
sha256 = hashlib.sha256()
for chunk in response.iter_content(chunk_size=1024 * 1024):
sha256.update(chunk)
hex_hash = sha256.hexdigest()
logging.info("Downloaded {} with hash {}".format(url, hex_hash))
return hex_hash
def get_formula_path(homebrew_dir, tap_repository):
"""Get the path for the buck forumula in the given repository"""
result = brew(homebrew_dir, ["formula", tap_repository + "/buck"], None, True)
return result.stdout.decode("utf-8").strip()
def setup_tap(homebrew_dir, tap_repository):
"""Make sure that `tap_repository` is tapped"""
logging.info("Tapping {}".format(tap_repository))
brew(homebrew_dir, ["tap", tap_repository])
logging.info("Tapped {}".format(tap_repository))
def update_formula_before_bottle(
repository, release_version, release_timestamp, formula_path, tarball_sha256
):
"""
Updates `formula_path` with correct urls, version and sha for building a bottle
Args:
release: The github release object
release_version: The version of the release (no "v" prefix)
release_timestamp: The timestamp to use while building
formula_path: The local path to the buck formula
tarball_sha256: The sha256 of the source tarball for the specified release
"""
logging.info("Updating formula at {}".format(formula_path))
with open(formula_path, "r") as fin:
all_data = fin.read()
all_data = re.sub(
r"BUCK_VERSION = .*$",
'BUCK_VERSION = "{}".freeze'.format(release_version),
all_data,
flags=re.MULTILINE,
)
all_data = re.sub(
r"BUCK_RELEASE_TIMESTAMP = .*$",
'BUCK_RELEASE_TIMESTAMP = "{}".freeze'.format(release_timestamp),
all_data,
flags=re.MULTILINE,
)
all_data = re.sub(
r'sha256 "[a-z0-9]{64}"$',
'sha256 "{}"'.format(tarball_sha256),
all_data,
flags=re.MULTILINE,
)
# This is a wholly undocumented endpoint, but is not subject to ratelimiting
# See https://github.com/facebook/homebrew-fb/pull/33
all_data = re.sub(
r' url "https://.+"$',
r' url "https://github.com/{repository}/archive/v#{{BUCK_VERSION}}.tar.gz"'.format(
repository=repository
),
all_data,
flags=re.MULTILINE,
)
all_data = re.sub(
r' root_url "https://github.com/.*/releases/download/v#{BUCK_VERSION}"',
r' root_url "https://github.com/{repository}/releases/download/v#{{BUCK_VERSION}}"'.format(
repository=repository
),
all_data,
flags=re.MULTILINE,
)
with open(formula_path, "w") as fout:
fout.write(all_data)
def build_bottle_file(
homebrew_dir,
tap_repository,
tap_path,
release_version,
target_macos_version,
output_dir,
):
"""
Builds the actual bottle file via brew
Args:
tap_repository: The name of the tap repository
tap_path: The local path to the given tap repository
release_version: The version that should be built (no "v" prefix)
target_macos_version: The target macos short nameto use in the resulting path
output_dir: The directory to move the build artifact to after building
Returns:
The path to the bottle.tar.gz
"""
brew_target = tap_repository + "/buck"
logging.info("Building bottle")
# Cool, so install --force will still not rebuild. Uninstall, and just don't
# care if the uninstall fails
brew(homebrew_dir, ["uninstall", "--force", brew_target], tap_path, check=False)
brew(homebrew_dir, ["install", "--force", "--build-bottle", brew_target], tap_path)
logging.info("Creating bottle file")
brew(
homebrew_dir,
["bottle", "--no-rebuild", "--skip-relocation", brew_target],
tap_path,
)
logging.info("Created bottle file")
bottle_filename = "buck-{ver}.{macos_ver}.bottle.tar.gz".format(
ver=release_version, macos_ver=target_macos_version
)
bottle_path = os.path.join(output_dir, bottle_filename)
bottles = glob.glob(
os.path.join(tap_path, "buck--{}*.bottle.tar.gz".format(release_version))
)
if len(bottles) != 1:
raise ReleaseException(
"Got an invalid number of bottle files ({} files: {})".format(
len(bottles), " ".join(bottles)
)
)
shutil.move(bottles[0], bottle_path)
return bottle_path
def get_sha256(path, chunk_size=1024 * 1024):
"""Get the sha256 of a file"""
sha = hashlib.sha256()
with open(path, "rb") as fin:
data = fin.read(chunk_size)
while data:
sha.update(data)
data = fin.read(chunk_size)
return sha.hexdigest()
def update_formula_after_bottle(formula_path, sha, target_macos_version_spec):
"""
Update the buck formula with the sha for the newly created bottle
Args:
formula_path: The path to the buck formula
sha: The new sha to use
target_macos_version_spec: The version spec to use for this sha
"""
logging.info("Updating formula with new bottle sha")
with open(formula_path, "r") as fin:
all_data = fin.read()
all_data = re.sub(
r'sha256 "[a-z0-9]+" => :.*$',
'sha256 "{}" => :{}'.format(sha, target_macos_version_spec),
all_data,
flags=re.MULTILINE,
)
with open(formula_path, "w") as fout:
fout.write(all_data)
logging.info("Updated formula with new bottle sha")
def push_tap(git_repository, tap_path, version):
"""
Grab any working directory changes for the tap, clone a new tap repository,
and push those changes upstream. The original tap path is in a clean state
after this push. The clone is done with ssh, so ssh keys must be available
Args:
git_repository: The repo on github that needs to be cloned/pushed to
tap_path: The directory that the tap (with changes) exists in
version: The version to use in commit messages
"""
logging.info("Gathering git diff from {}".format(tap_path))
git_diff = run(["git", "diff"], tap_path, True).stdout
git_url = "[email protected]:{}.git".format(git_repository)
with tempfile.TemporaryDirectory() as temp_dir:
logging.info("Cloning {} into {}".format(git_url, temp_dir))
run(["git", "clone", git_url, temp_dir])
logging.info("Cloned into {}. Applying patch".format(temp_dir))
run(["git", "apply", "-"], temp_dir, input=git_diff)
logging.info("Committing...")
with tempfile.NamedTemporaryFile() as fout:
commit_message = (
"Bump buck to version {}\n\nThis commit was generated by "
"release automation\n"
).format(version)
fout.write(commit_message.encode("utf-8"))
fout.flush()
run(["git", "commit", "-F", fout.name, "buck.rb"], temp_dir)
logging.info("Pushing commit upstream")
run(["git", "push", "origin"], temp_dir)
logging.info("Pushed commit upstream!")
logging.info("Resetting state of {}, and updating it after push".format(tap_path))
run(["git", "checkout", "buck.rb"], tap_path)
run(["git", "checkout", "master"], tap_path)
run(["git", "pull"], tap_path)
logging.info("Reset state of {}, and updating it after push".format(tap_path))
def validate_tap(homebrew_dir, tap_repository, version):
logging.info("Validating that brew installs with new tap information")
brew_target = tap_repository + "/buck"
brew(homebrew_dir, ["uninstall", "--force", brew_target])
brew(homebrew_dir, ["install", brew_target])
output = (
brew(homebrew_dir, ["info", brew_target], capture_output=True)
.stdout.decode("utf-8")
.splitlines()[0]
)
if "{}/buck: stable {}".format(tap_repository, version) not in output:
raise ReleaseException(
"Expected version {} to be installed, but got this from `brew info {}`: {}".format(
version, tap_repository, output
)
)
def audit_tap(homebrew_dir, tap_repository):
logging.info("Running brew audit")
brew_target = tap_repository + "/buck"
brew(homebrew_dir, ["audit", brew_target])
def publish_tap_changes(homebrew_dir, tap_repository, version):
git_user, git_repo = tap_repository.split("/")
full_git_repo = "{}/homebrew-{}".format(git_user, git_repo)
formula_path = get_formula_path(homebrew_dir, tap_repository)
tap_path = os.path.dirname(formula_path)
push_tap(full_git_repo, tap_path, version)
def log_about_manual_tap_push(homebrew_dir, tap_repository):
formula_path = get_formula_path(homebrew_dir, tap_repository)
tap_path = os.path.dirname(formula_path)
logging.info(
"The homebrew tap is ready for a pull request. It can be found at {}".format(
tap_path
)
)
def build_bottle(
homebrew_dir,
release,
repository,
tap_repository,
target_macos_version,
target_macos_version_spec,
output_dir,
):
release_version, release_timestamp = get_version_and_timestamp_from_release(release)
if not os.path.exists(os.path.join(homebrew_dir, "bin", "brew")):
install_homebrew(homebrew_dir)
setup_tap(homebrew_dir, tap_repository)
formula_path = get_formula_path(homebrew_dir, tap_repository)
tap_path = os.path.dirname(formula_path)
# This is a wholly undocumented endpoint, but is not subject to ratelimiting
# See https://github.com/facebook/homebrew-fb/pull/33
undocumented_tarball_url = (
"https://github.com/{repository}/archive/{tag_name}.tar.gz".format(
repository=repository, tag_name=release["tag_name"]
)
)
tarball_sha256 = fetch_tarball_sha256(undocumented_tarball_url)
# First, update the bottle to have the new version and tarball sha.
update_formula_before_bottle(
repository, release_version, release_timestamp, formula_path, tarball_sha256
)
# Build the actual bottle file
bottle_path = build_bottle_file(
homebrew_dir,
tap_repository,
tap_path,
release_version,
target_macos_version,
output_dir,
)
# Get the bottle file sha, and update the bottle formula
bottle_sha = get_sha256(bottle_path)
update_formula_after_bottle(formula_path, bottle_sha, target_macos_version_spec)
# Make sure that we still pass `brew audit`
audit_tap(homebrew_dir, tap_repository)
return bottle_path
| {
"content_hash": "777b03c22a90a51c2bb9a95d63819002",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 106,
"avg_line_length": 35.29545454545455,
"alnum_prop": 0.6256439150032196,
"repo_name": "JoelMarcey/buck",
"id": "cd67b358dc96031078dd16dd58aa7ce7a1e33c9c",
"size": "13022",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tools/release/platforms/homebrew.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
import click
from arrow.cli import pass_context
from arrow.decorators import custom_exception, dict_output
@click.command('updateValue')
@click.argument("id_number")
@click.argument("new_value")
@click.option(
"--metadata",
help=""
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, id_number, new_value, metadata=""):
"""TODO: Undocumented
Output:
???
"""
return ctx.gi.cannedvalues.updateValue(id_number, new_value, metadata=metadata)
| {
"content_hash": "fd7a3235005cbd17c2c4412201f615a5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 20.608695652173914,
"alnum_prop": 0.7067510548523207,
"repo_name": "erasche/python-apollo",
"id": "f876da0b95d2aa1037ec8c6a3046600d4444c56b",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arrow/commands/cannedvalues/updateValue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18804"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.nddata import NDData, NDSlicingMixin
from astropy.nddata.nduncertainty import NDUncertainty, StdDevUncertainty
from astropy import units as u
# Just add the Mixin to NDData
# TODO: Make this use NDDataRef instead!
class NDDataSliceable(NDSlicingMixin, NDData):
pass
# Just some uncertainty (following the StdDevUncertainty implementation of
# storing the uncertainty in a property 'array') with slicing.
class SomeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return 'fake'
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
def test_slicing_only_data():
data = np.arange(10)
nd = NDDataSliceable(data)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
def test_slicing_data_scalar_fail():
data = np.array(10)
nd = NDDataSliceable(data)
with pytest.raises(TypeError): # as exc
nd[:]
# assert exc.value.args[0] == 'Scalars cannot be sliced.'
def test_slicing_1ddata_ndslice():
data = np.array([10, 20])
nd = NDDataSliceable(data)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
@pytest.mark.parametrize('prop_name', ['mask', 'wcs', 'uncertainty'])
def test_slicing_1dmask_ndslice(prop_name):
# Data is 2d but mask only 1d so this should let the IndexError when
# slicing the mask rise to the user.
data = np.ones((3, 3))
kwarg = {prop_name: np.ones(3)}
nd = NDDataSliceable(data, **kwarg)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
def test_slicing_all_npndarray_1d():
data = np.arange(10)
mask = data > 3
uncertainty = StdDevUncertainty(np.linspace(10, 20, 10))
wcs = np.linspace(1, 1000, 10)
# Just to have them too
unit = u.s
meta = {'observer': 'Brian'}
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs,
unit=unit, meta=meta)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5].array, nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
assert unit is nd2.unit
assert meta == nd.meta
def test_slicing_all_npndarray_nd():
# See what happens for multidimensional properties
data = np.arange(1000).reshape(10, 10, 10)
mask = data > 3
uncertainty = np.linspace(10, 20, 1000).reshape(10, 10, 10)
wcs = np.linspace(1, 1000, 1000).reshape(10, 10, 10)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
# Slice only 1D
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
# Slice 3D
nd2 = nd[2:5, :, 4:7]
assert_array_equal(data[2:5, :, 4:7], nd2.data)
assert_array_equal(mask[2:5, :, 4:7], nd2.mask)
assert_array_equal(uncertainty[2:5, :, 4:7], nd2.uncertainty.array)
assert_array_equal(wcs[2:5, :, 4:7], nd2.wcs)
def test_slicing_all_npndarray_shape_diff():
data = np.arange(10)
mask = (data > 3)[0:9]
uncertainty = np.linspace(10, 20, 15)
wcs = np.linspace(1, 1000, 12)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
# All are sliced even if the shapes differ (no Info)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
def test_slicing_all_something_wrong():
data = np.arange(10)
mask = [False]*10
uncertainty = {'rdnoise': 2.9, 'gain': 1.4}
wcs = 145 * u.degree
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
# Sliced properties:
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
# Not sliced attributes (they will raise a Info nevertheless)
uncertainty is nd2.uncertainty
assert_array_equal(wcs, nd2.wcs)
def test_boolean_slicing():
data = np.arange(10)
mask = data.copy()
uncertainty = StdDevUncertainty(data.copy())
wcs = data.copy()
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
assert_array_equal(data[3:8], nd2.data)
assert_array_equal(mask[3:8], nd2.mask)
assert_array_equal(wcs[3:8], nd2.wcs)
assert_array_equal(uncertainty.array[3:8], nd2.uncertainty.array)
| {
"content_hash": "7d6d99c2d1e80d5a8cd7bdc9220453ef",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 75,
"avg_line_length": 31.128205128205128,
"alnum_prop": 0.6583607907742999,
"repo_name": "bsipocz/astropy",
"id": "7fc29b701e68f4fec841af178e0a69c6a8ec8abf",
"size": "4922",
"binary": false,
"copies": "1",
"ref": "refs/heads/hacking",
"path": "astropy/nddata/mixins/tests/test_ndslicing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "442627"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9395160"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0084_auto_20170112_1614'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='funds_from',
field=models.CharField(choices=[('C', 'Continuing (claimantship)'), ('I', 'Core (Software Sustainability Institute)'), ('F', 'Grant (inauguration claimantship)')], default='G', max_length=1),
),
]
| {
"content_hash": "22decfaef64bf7519708b4728b96b4c6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 203,
"avg_line_length": 29.72222222222222,
"alnum_prop": 0.6149532710280374,
"repo_name": "softwaresaved/fat",
"id": "d4fe495523043b080dd042fcbffad3015b493cfa",
"size": "608",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lowfat/migrations/0085_auto_20170125_1107.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3202"
},
{
"name": "HTML",
"bytes": "38552"
},
{
"name": "JavaScript",
"bytes": "653"
},
{
"name": "Python",
"bytes": "235043"
},
{
"name": "Shell",
"bytes": "1346"
}
],
"symlink_target": ""
} |
__author__ = 'Joe Linn'
from .abstract import AbstractQuery
class Term(AbstractQuery):
def __init__(self, term=None):
"""
@param term: optional
@type term: dict
"""
super(Term, self).__init__()
if term is not None:
self.set_raw_term(term)
def set_raw_term(self, term):
"""
Set term.
@param term:
@type term: dict
@return:
@rtype: self
"""
self.params = term
return self
def set_term(self, key, value, boost=1.0):
"""
Add a term to the query
@param key: key to query
@type key: str
@param value: value(s) for the query
@type value: str or list
@param boost:
@type boost: float
@return:
@rtype: self
"""
return self.set_raw_term({key: {'value': value, 'boost': boost}})
| {
"content_hash": "7d71c5903b32cee315d1cf83ee89d2a4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 23.41025641025641,
"alnum_prop": 0.49726177437020813,
"repo_name": "jlinn/pylastica",
"id": "981f011a8a1c91dbe612262a0f4c0da84252d312",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylastica/query/term.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "547260"
},
{
"name": "Shell",
"bytes": "1771"
}
],
"symlink_target": ""
} |
"""
Author: Dr. John T. Hwang <[email protected]>
This package is distributed under New BSD license.
"""
import numpy as np
import scipy.sparse.linalg
import scipy.linalg
import contextlib
from smt.utils.options_dictionary import OptionsDictionary
VALID_SOLVERS = (
"krylov-dense",
"dense-lu",
"dense-chol",
"lu",
"ilu",
"krylov",
"krylov-lu",
"krylov-mg",
"gs",
"jacobi",
"mg",
"null",
)
def get_solver(solver):
if solver == "dense-lu":
return DenseLUSolver()
elif solver == "dense-chol":
return DenseCholeskySolver()
elif solver == "krylov-dense":
return KrylovSolver(pc="dense")
elif solver == "lu" or solver == "ilu":
return DirectSolver(alg=solver)
elif solver == "krylov":
return KrylovSolver()
elif solver == "krylov-lu":
return KrylovSolver(pc="lu")
elif solver == "krylov-mg":
return KrylovSolver(pc="mg")
elif solver == "gs" or solver == "jacobi":
return StationarySolver(solver=solver)
elif solver == "mg":
return MultigridSolver()
elif isinstance(solver, LinearSolver):
return solver
elif solver == "null":
return NullSolver()
elif solver == None:
return None
class Callback(object):
def __init__(self, size, string, interval, printer):
self.size = size
self.string = string
self.interval = interval
self.printer = printer
self.counter = 0
self.ind_y = 0
self.mtx = None
self.rhs = None
self.norm0 = 1.0
def _print_norm(self, norm):
if self.counter == 0:
self.norm0 = norm
if self.counter % self.interval == 0:
self.printer(
"%s (%i x %i mtx), output %-3i : %3i %15.9e %15.9e"
% (
self.string,
self.size,
self.size,
self.ind_y,
self.counter,
norm,
norm / self.norm0,
)
)
self.counter += 1
def _print_res(self, res):
self._print_norm(res)
def _print_sol(self, sol):
res = self.mtx.dot(sol) - self.rhs
norm = np.linalg.norm(res)
self._print_norm(norm)
class LinearSolver(object):
def __init__(self, **kwargs):
self.mtx = None
self.rhs = None
self.options = OptionsDictionary()
self.options.declare("print_init", True, types=bool)
self.options.declare("print_solve", True, types=bool)
self._initialize()
self.options.update(kwargs)
def _initialize(self):
pass
def _setup(self, mtx, printer, mg_matrices=[]):
pass
def _solve(self, rhs, sol=None, ind_y=0):
pass
def _clone(self):
clone = self.__class__()
clone.options.update(clone.options._dict)
return clone
@contextlib.contextmanager
def _active(self, active):
orig_active = self.printer.active
self.printer.active = self.printer.active and active
yield self.printer
self.printer.active = orig_active
class NullSolver(LinearSolver):
def solve(self, rhs, sol=None, ind_y=0):
pass
class DenseCholeskySolver(LinearSolver):
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx.A
assert isinstance(self.mtx, np.ndarray), "mtx is of type %s" % type(mtx)
with printer._timed_context(
"Performing Chol. fact. (%i x %i mtx)" % mtx.shape
):
self.upper = scipy.linalg.cholesky(self.mtx)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context("Back solving (%i x %i mtx)" % self.mtx.shape):
sol[:] = rhs
scipy.linalg.solve_triangular(
self.upper, sol, overwrite_b=True, trans="T"
)
scipy.linalg.solve_triangular(self.upper, sol, overwrite_b=True)
return sol
class DenseLUSolver(LinearSolver):
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
assert isinstance(mtx, np.ndarray), "mtx is of type %s" % type(mtx)
with printer._timed_context(
"Performing LU fact. (%i x %i mtx)" % mtx.shape
):
self.fact = scipy.linalg.lu_factor(mtx)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context("Back solving (%i x %i mtx)" % self.mtx.shape):
sol[:] = scipy.linalg.lu_solve(self.fact, rhs)
return sol
class DirectSolver(LinearSolver):
def _initialize(self):
self.options.declare("alg", "lu", values=["lu", "ilu"])
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
assert isinstance(mtx, scipy.sparse.spmatrix), "mtx is of type %s" % type(
mtx
)
with printer._timed_context(
"Performing %s fact. (%i x %i mtx)"
% ((self.options["alg"],) + mtx.shape)
):
if self.options["alg"] == "lu":
self.fact = scipy.sparse.linalg.splu(mtx)
elif self.options["alg"] == "ilu":
self.fact = scipy.sparse.linalg.spilu(
mtx, drop_rule="interp", drop_tol=1e-3, fill_factor=2
)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context("Back solving (%i x %i mtx)" % self.mtx.shape):
sol[:] = self.fact.solve(rhs)
return sol
class KrylovSolver(LinearSolver):
def _initialize(self):
self.options.declare("interval", 10, types=int)
self.options.declare("solver", "cg", values=["cg", "bicgstab", "gmres"])
self.options.declare(
"pc",
None,
values=[None, "ilu", "lu", "gs", "jacobi", "mg", "dense"],
types=LinearSolver,
)
self.options.declare("ilimit", 100, types=int)
self.options.declare("atol", 1e-15, types=(int, float))
self.options.declare("rtol", 1e-15, types=(int, float))
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
pc_solver = get_solver(self.options["pc"])
if pc_solver is not None:
pc_solver._setup(mtx, printer, mg_matrices=mg_matrices)
self.pc_solver = pc_solver
self.pc_op = scipy.sparse.linalg.LinearOperator(
mtx.shape, matvec=pc_solver._solve
)
else:
self.pc_solver = None
self.pc_op = None
self.callback = Callback(
mtx.shape[0], "Krylov solver", self.options["interval"], printer
)
if self.options["solver"] == "cg":
self.solver = scipy.sparse.linalg.cg
self.callback_func = self.callback._print_sol
self.solver_kwargs = {
"atol": "legacy",
"tol": self.options["atol"],
"maxiter": self.options["ilimit"],
}
elif self.options["solver"] == "bicgstab":
self.solver = scipy.sparse.linalg.bicgstab
self.callback_func = self.callback._print_sol
self.solver_kwargs = {
"tol": self.options["atol"],
"maxiter": self.options["ilimit"],
}
elif self.options["solver"] == "gmres":
self.solver = scipy.sparse.linalg.gmres
self.callback_func = self.callback._print_res
self.solver_kwargs = {
"tol": self.options["atol"],
"maxiter": self.options["ilimit"],
"restart": min(self.options["ilimit"], mtx.shape[0]),
}
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context(
"Running %s Krylov solver (%i x %i mtx)"
% ((self.options["solver"],) + self.mtx.shape)
):
self.callback.counter = 0
self.callback.ind_y = ind_y
self.callback.mtx = self.mtx
self.callback.rhs = rhs
self.callback._print_sol(sol)
tmp, info = self.solver(
self.mtx,
rhs,
x0=sol,
M=self.pc_op,
callback=self.callback_func,
**self.solver_kwargs,
)
sol[:] = tmp
return sol
class StationarySolver(LinearSolver):
def _initialize(self):
self.options.declare("interval", 10, types=int)
self.options.declare("solver", "gs", values=["gs", "jacobi"])
self.options.declare("damping", 1.0, types=(int, float))
self.options.declare("ilimit", 10, types=int)
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
self.callback = Callback(
mtx.shape[0], "Stationary solver", self.options["interval"], printer
)
with printer._timed_context(
"Initializing %s solver (%i x %i mtx)"
% ((self.options["solver"],) + self.mtx.shape)
):
if self.options["solver"] == "jacobi":
# A x = b
# x_{k+1} = x_k + w D^{-1} (b - A x_k)
self.d_inv = self.options["damping"] / self._split_mtx_diag()
self.iterate = self._jacobi
elif self.options["solver"] == "gs":
# A x = b
# x_{k+1} = x_k + (1/w D + L)^{-1} (b - A x_k)
mtx_d = self._split_mtx("diag")
mtx_l = self._split_mtx("lower")
mtx_ldw = mtx_l + mtx_d / self.options["damping"]
self.inv = scipy.sparse.linalg.splu(mtx_ldw)
self.iterate = self._gs
def _split_mtx_diag(self):
shape = self.mtx.shape
rows, cols, data = scipy.sparse.find(self.mtx)
mask_d = rows == cols
diag = np.zeros(shape[0])
np.add.at(diag, rows[mask_d], data[mask_d])
return diag
def _split_mtx(self, part):
shape = self.mtx.shape
rows, cols, data = scipy.sparse.find(self.mtx)
if part == "diag":
mask = rows == cols
elif part == "lower":
mask = rows > cols
elif part == "upper":
mask = rows < cols
return scipy.sparse.csc_matrix(
(data[mask], (rows[mask], cols[mask])), shape=shape
)
def _jacobi(self, rhs, sol):
# A x = b
# x_{k+1} = x_k + w D^{-1} (b - A x_k)
sol += self.d_inv * (rhs - self.mtx.dot(sol))
def _gs(self, rhs, sol):
# A x = b
# x_{k+1} = x_k + (1/w D + L)^{-1} (b - A x_k)
sol += self.inv.solve(rhs - self.mtx.dot(sol))
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
self.callback.counter = 0
self.callback.ind_y = ind_y
self.callback.mtx = self.mtx
self.callback.rhs = rhs
with printer._timed_context(
"Running %s stationary solver (%i x %i mtx)"
% ((self.options["solver"],) + self.mtx.shape)
):
for ind in range(self.options["ilimit"]):
self.iterate(rhs, sol)
self.callback._print_sol(sol)
return sol
class MultigridSolver(LinearSolver):
def _initialize(self):
self.options.declare("interval", 1, types=int)
self.options.declare("mg_cycles", 0, types=int)
self.options.declare(
"solver",
"null",
values=["null", "gs", "jacobi", "krylov"],
types=LinearSolver,
)
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
solver = get_solver(self.options["solver"])
mg_solver = solver._clone()
mg_solver._setup(mtx, printer)
self.mg_mtx = [mtx]
self.mg_sol = [np.zeros(self.mtx.shape[0])]
self.mg_rhs = [np.zeros(self.mtx.shape[0])]
self.mg_ops = []
self.mg_solvers = [mg_solver]
for ind, mg_op in enumerate(mg_matrices):
mg_mtx = mg_op.T.dot(self.mg_mtx[-1]).dot(mg_op).tocsc()
mg_sol = mg_op.T.dot(self.mg_sol[-1])
mg_rhs = mg_op.T.dot(self.mg_rhs[-1])
mg_solver = solver._clone()
mg_solver._setup(mg_mtx, printer)
self.mg_mtx.append(mg_mtx)
self.mg_sol.append(mg_sol)
self.mg_rhs.append(mg_rhs)
self.mg_ops.append(mg_op)
self.mg_solvers.append(mg_solver)
mg_mtx = self.mg_mtx[-1]
mg_solver = DirectSolver()
mg_solver._setup(mg_mtx, printer)
self.mg_solvers[-1] = mg_solver
self.callback = Callback(
mtx.shape[0], "Multigrid solver", self.options["interval"], printer
)
def _restrict(self, ind_level):
mg_op = self.mg_ops[ind_level]
mtx = self.mg_mtx[ind_level]
sol = self.mg_sol[ind_level]
rhs = self.mg_rhs[ind_level]
res = rhs - mtx.dot(sol)
res_coarse = mg_op.T.dot(res)
self.mg_rhs[ind_level + 1][:] = res_coarse
def _smooth_and_restrict(self, ind_level, ind_cycle, ind_y):
mg_op = self.mg_ops[ind_level]
mtx = self.mg_mtx[ind_level]
sol = self.mg_sol[ind_level]
rhs = self.mg_rhs[ind_level]
solver = self.mg_solvers[ind_level]
solver.print_info = "MG iter %i level %i" % (ind_cycle, ind_level)
solver._solve(rhs, sol, ind_y)
res = rhs - mtx.dot(sol)
res_coarse = mg_op.T.dot(res)
self.mg_rhs[ind_level + 1][:] = res_coarse
def _coarse_solve(self, ind_cycle, ind_y):
sol = self.mg_sol[-1]
rhs = self.mg_rhs[-1]
solver = self.mg_solvers[-1]
solver.print_info = "MG iter %i level %i" % (ind_cycle, len(self.mg_ops))
solver._solve(rhs, sol, ind_y)
def _smooth_and_interpolate(self, ind_level, ind_cycle, ind_y):
mg_op = self.mg_ops[ind_level]
mtx = self.mg_mtx[ind_level]
sol = self.mg_sol[ind_level]
rhs = self.mg_rhs[ind_level]
solver = self.mg_solvers[ind_level]
solver.print_info = "MG iter %i level %i" % (ind_cycle, ind_level)
sol_coarse = self.mg_sol[ind_level + 1]
sol += mg_op.dot(sol_coarse)
solver._solve(rhs, sol, ind_y)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
orig_sol = sol
self.callback.counter = 0
self.callback.ind_y = ind_y
self.callback.mtx = self.mtx
self.callback.rhs = rhs
self.mg_rhs[0][:] = rhs
for ind_level in range(len(self.mg_ops)):
self._restrict(ind_level)
self._coarse_solve(-1, ind_y)
for ind_level in range(len(self.mg_ops) - 1, -1, -1):
self._smooth_and_interpolate(ind_level, -1, ind_y)
for ind_cycle in range(self.options["mg_cycles"]):
for ind_level in range(len(self.mg_ops)):
self._smooth_and_restrict(ind_level, ind_cycle, ind_y)
self._coarse_solve(ind_cycle, ind_y)
for ind_level in range(len(self.mg_ops) - 1, -1, -1):
self._smooth_and_interpolate(ind_level, ind_cycle, ind_y)
orig_sol[:] = self.mg_sol[0]
return orig_sol
| {
"content_hash": "1d82139f9b91d5cb6d061bd799f011fd",
"timestamp": "",
"source": "github",
"line_count": 537,
"max_line_length": 87,
"avg_line_length": 32.497206703910614,
"alnum_prop": 0.5104578534181422,
"repo_name": "SMTorg/smt",
"id": "46fe1af79f0216d9f4d595e6721aa8f685295a08",
"size": "17451",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smt/utils/linear_solvers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "22649"
},
{
"name": "Cython",
"bytes": "5481"
},
{
"name": "Jupyter Notebook",
"bytes": "14431913"
},
{
"name": "Python",
"bytes": "799500"
}
],
"symlink_target": ""
} |
"""
View class of the website
~~~~~~~~~~~~~~~~~~~~~~~~~
The website respects the MVC design pattern and this class is the view.
"""
import os
import cherrypy
from Cheetah.Template import Template
import cgi
import csv
import StringIO
from csv import DictReader
import urllib2
import json
from cherrypy import _cperror
import master_controler
from pubsublogger import publisher
def merge_csvs(asns):
url = 'http://{host}:{port}/csv/'.format(
host = cherrypy.config.get('server.socket_host'),
port = cherrypy.config.get('server.socket_port'))
asns = json.loads(asns)
if asns[0] == 0:
return json.dumps('')
temp_dict = {}
no_entries = []
for asn in asns:
try:
f = urllib2.urlopen(url + asn)
for line in DictReader(f):
date = line['day']
rank = line['rank']
if temp_dict.get(date) is None:
temp_dict[date] = {}
temp_dict[date][asn] = rank
except:
no_entries += asn
to_return = 'date,' + ','.join(asns) + '\n'
for date, entries in temp_dict.iteritems():
to_return += date
for asn in asns:
rank = entries.get(asn)
if rank is None:
rank = 0
to_return += ',' + str(rank)
to_return += '\n'
return json.dumps(to_return)
class Master(object):
def __init__(self):
self.dir_templates = 'templates'
publisher.channel = 'Website'
def __none_if_empty(self, to_check = None):
"""
Ensure the empty paramaters are None before doing anything
"""
if to_check is None or len(to_check) == 0:
return None
return cgi.escape(to_check, True)
def __init_template(self, template_name, source = None, date = None):
"""
Initialize the basic components of the template
"""
template = Template(file = os.path.join(self.dir_templates,
template_name + '.tmpl'))
source = self.__none_if_empty(source)
date = self.__none_if_empty(date)
template.css_file = 'http://www.circl.lu/css/styles.css'
template.logo = 'http://www.circl.lu/pics/logo.png'
template.banner = 'http://www.circl.lu/pics/topbanner.jpg'
template.sources = master_controler.get_sources(date)
template.dates = master_controler.get_dates()
template.source = source
template.date = date
return template
def __csv2string(self, data):
si = StringIO.StringIO();
cw = csv.writer(si);
cw.writerow(data);
return si.getvalue().strip('\r\n');
def __query_logging(self, ip, user_agent, webpage, date=None, source=None,
asn=None, asn_details=None, compared_asns=None, ip_lookup=None):
publisher.info(self.__csv2string([ip, user_agent, webpage, date,
source, asn, asn_details, compared_asns, ip_lookup]))
@cherrypy.expose
def default(self):
"""
Load the index
"""
return str(self.index())
@cherrypy.expose
def index(self, source = None, date = None):
"""
Generate the view of the global ranking
"""
source = self.__none_if_empty(source)
date = self.__none_if_empty(date)
self.__query_logging(cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', 'Empty User-Agent'),
webpage='index', date=date, source=source)
histo, list_size = master_controler.prepare_index(source, date)
template = self.__init_template('index_asn', source, date)
template.list_size = list_size
template.histories = histo
return str(template)
@cherrypy.expose
def asn_details(self, source = None, asn = None, ip_details = None, date = None):
"""
Generate the view of an ASN
"""
asn = self.__none_if_empty(asn)
source = self.__none_if_empty(source)
date = self.__none_if_empty(date)
if asn is None:
return self.index(source, date)
self.__query_logging(cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', 'Empty User-Agent'),
webpage='asn_details', date=date, source=source, asn=asn,
asn_details = ip_details)
ip_details = self.__none_if_empty(ip_details)
template = self.__init_template('asn_details', source, date)
asn = asn.lstrip('AS')
if asn.isdigit():
template.asn = asn
asn_description, position, as_infos = master_controler.get_as_infos(asn,
date, source)
if as_infos is not None and len(as_infos) > 0:
template.asn_description = asn_description
template.asn_descs = as_infos
template.current_sources = master_controler.get_last_seen_sources(asn)
template.desc_history = master_controler.get_asn_descriptions(asn)
template.position = position[0]
template.size = position[1]
if len(template.current_sources.keys()) > 0:
template.sources = template.current_sources.keys()
if ip_details is not None:
template.ip_details = ip_details
template.ip_descs = master_controler.get_ip_info(asn,
ip_details, date, source)
else:
template.error = "Invalid query: " + asn
return str(template)
@cherrypy.expose
def comparator(self, asns = None):
"""
Generate the view comparing a set of ASNs
"""
asns = self.__none_if_empty(asns)
self.__query_logging(cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', 'Empty User-Agent'),
webpage='comparator', compared_asns=asns)
template = self.__init_template('comparator')
template.asns = asns
if asns is not None:
asns_list, details_list = master_controler.get_comparator_metatada(asns)
template.asns_json = json.dumps(asns_list)
template.asns_details = details_list
else:
template.asns_json = json.dumps([0])
template.asns_details = None
return str(template)
@cherrypy.expose
def trend(self):
"""
Print the trend World vs Luxembourg
"""
return self.trend_benelux()
#self.__query_logging(cherrypy.request.remote.ip,
# cherrypy.request.headers.get('User-Agent', 'Empty User-Agent'),
# webpage='trend')
#return str(self.__init_template('trend'))
@cherrypy.expose
def trend_benelux(self):
"""
Print the trend of the benelux countries
"""
self.__query_logging(cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', 'Empty User-Agent'),
webpage='trend_benelux')
return str(self.__init_template('trend_benelux'))
@cherrypy.expose
def map(self):
"""
Print the worldmap
"""
self.__query_logging(cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', 'Empty User-Agent'),
webpage='map')
return str(self.__init_template('map'))
@cherrypy.expose
def ip_lookup(self, ip = None):
ip = self.__none_if_empty(ip)
self.__query_logging(cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', 'Empty User-Agent'),
webpage='ip_lookup', ip_lookup=ip)
template = self.__init_template('ip_lookup')
template.ip = ip
result = master_controler.get_ip_lookup(ip)
if result is not None:
template.history = result[0]
template.ptrrecord = result[1]
else:
template.history = None
template.ptrrecord = None
return str(template)
def error_page_404(status, message, traceback, version):
"""
Display an error if the page does not exists
"""
return "Error %s - This page does not exist." % status
def handle_error():
cherrypy.response.status = 500
cherrypy.response.body = ["<html><body>Sorry, an error occured</body></html>"]
publisher.error('Request: '+ str(cherrypy.request.params) + '\n' +_cperror.format_exc())
if __name__ == "__main__":
website = Master()
cherrypy.config.update({'error_page.404': error_page_404})
cherrypy.config.update({'request.error_response': handle_error})
cherrypy.quickstart(website, config = 'config/web_bgp-ranking.ini')
| {
"content_hash": "6ed6a276c29d9f58103ad33c77ff2900",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 92,
"avg_line_length": 36.252066115702476,
"alnum_prop": 0.5768836201983358,
"repo_name": "CIRCL/bgpranking-redis-api",
"id": "8c80e078fe51caa087686b1f335f9aeed723f297",
"size": "8797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/website/master.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1724"
},
{
"name": "Makefile",
"bytes": "5625"
},
{
"name": "Python",
"bytes": "86150"
},
{
"name": "Shell",
"bytes": "2574"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
from campfin.seeder import *
Seeder().seed()
| {
"content_hash": "8d147dd41d14664badf1a754d7d496c4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 33,
"avg_line_length": 20,
"alnum_prop": 0.7875,
"repo_name": "huffpostdata/campfin-linker",
"id": "05fcaeaee1a96af90bace71672472ac16f1b90b6",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seed.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "34469"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import pdfrw
import tempfile
from spreadflow_delta.proc import ExtractorBase, util
class LoadPdfPages(ExtractorBase):
def __init__(self, key='path', slicekey=None, destkey='content'):
self.key = key
self.destkey = destkey
self.slicekey = slicekey
def extract(self, key, doc):
path_or_reader = doc[self.key]
if hasattr(path_or_reader, 'pages'):
reader = path_or_reader
else:
reader = pdfrw.PdfReader(path_or_reader)
if self.slicekey is not None:
slc = doc[self.slicekey]
doc[self.destkey] = tuple(reader.pages[slc.start:slc.stop:slc.step])
else:
doc[self.destkey] = tuple(reader.pages)
class SavePdfPages(ExtractorBase):
def __init__(self, key='content', destkey='savepath', clear=False, version='1.3', compress=False):
self.key = key
self.destkey = destkey
self.clear = clear
self.version = str(version)
self.compress = compress
def extract(self, key, doc):
path = doc[self.destkey]
tmpdir = os.path.dirname(path)
writer = pdfrw.PdfWriter(version=self.version, compress=self.compress)
for page in doc[self.key]:
writer.addpage(page)
with util.open_replace(path) as stream:
writer.write(stream)
if self.clear:
del doc[self.key]
class MergePdfPage(ExtractorBase):
def __init__(self, key='content', destkey='content'):
self.key = key
self.destkey = destkey
def extract(self, key, doc):
result = pdfrw.PageMerge()
for page in doc[self.key]:
result.add(page)
doc[self.destkey] = (result.render(),)
| {
"content_hash": "a26c0efbdcb8d520acd8fd1468ee456e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 102,
"avg_line_length": 29.396825396825395,
"alnum_prop": 0.6123110151187905,
"repo_name": "znerol/spreadflow-pdf",
"id": "cecdef057a0d98277d992d57607cdb83ce47f399",
"size": "1876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spreadflow_pdf/proc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10522"
}
],
"symlink_target": ""
} |
from robofab.pens.pointPen import BasePointToSegmentPen
from ufoLib.pointPen import AbstractPointPen
"""
Printing pens print their data. Useful for demos and debugging.
"""
__all__ = ["PrintingPointPen", "PrintingSegmentPen", "SegmentPrintingPointPen"]
class PrintingPointPen(AbstractPointPen):
"""A PointPen that prints every step.
"""
def __init__(self):
self.havePath = False
def beginPath(self):
self.havePath = True
print "pen.beginPath()"
def endPath(self):
self.havePath = False
print "pen.endPath()"
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
assert self.havePath
args = ["(%s, %s)" % (pt[0], pt[1])]
if segmentType is not None:
args.append("segmentType=%r" % segmentType)
if smooth:
args.append("smooth=True")
if name is not None:
args.append("name=%r" % name)
if kwargs:
args.append("**%s" % kwargs)
print "pen.addPoint(%s)" % ", ".join(args)
def addComponent(self, baseGlyphName, transformation):
assert not self.havePath
print "pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation))
from fontTools.pens.basePen import AbstractPen
class PrintingSegmentPen(AbstractPen):
"""A SegmentPen that prints every step.
"""
def moveTo(self, pt):
print "pen.moveTo(%s)" % (pt,)
def lineTo(self, pt):
print "pen.lineTo(%s)" % (pt,)
def curveTo(self, *pts):
print "pen.curveTo%s" % (pts,)
def qCurveTo(self, *pts):
print "pen.qCurveTo%s" % (pts,)
def closePath(self):
print "pen.closePath()"
def endPath(self):
print "pen.endPath()"
def addComponent(self, baseGlyphName, transformation):
print "pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation))
class SegmentPrintingPointPen(BasePointToSegmentPen):
"""A SegmentPen that pprints every step.
"""
def _flushContour(self, segments):
from pprint import pprint
pprint(segments)
if __name__ == "__main__":
p = SegmentPrintingPointPen()
from robofab.test.test_pens import TestShapes
TestShapes.onCurveLessQuadShape(p)
| {
"content_hash": "e79fd2bfd42a4b8ca67b3554da023281",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 24.178571428571427,
"alnum_prop": 0.6976858690300345,
"repo_name": "metapolator/mutatormathtools",
"id": "ead2f86643bf23790c00acc2ac1ac68de3710dab",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_modules/lib/python/robofab/pens/printingPens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "3108"
},
{
"name": "Python",
"bytes": "2101371"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
} |
import pytest
import time
import unittest.mock
from girder import events
class EventsHelper:
def __init__(self):
self.ctr = 0
self.responses = None
def _raiseException(self, event):
raise Exception('Failure condition')
def _increment(self, event):
self.ctr += event.info['amount']
def _incrementWithResponse(self, event):
self._increment(event)
event.addResponse('foo')
def _eatEvent(self, event):
event.addResponse({'foo': 'bar'})
event.stopPropagation()
event.preventDefault()
def _shouldNotBeCalled(self, event):
pytest.fail('This should not be called due to stopPropagation().')
@pytest.fixture
def eventsHelper():
yield EventsHelper()
def testSynchronousEvents(eventsHelper):
name, failname = '_test.event', '_test.failure'
handlerName = '_test.handler'
with events.bound(name, handlerName, eventsHelper._increment), \
events.bound(failname, handlerName, eventsHelper._raiseException):
# Make sure our exception propagates out of the handler
with pytest.raises(Exception, match='^Failure condition$'):
events.trigger(failname)
# Bind an event to increment the counter
assert eventsHelper.ctr == 0
event = events.trigger(name, {'amount': 2})
assert eventsHelper.ctr == 2
assert event.propagate
assert not event.defaultPrevented
assert event.responses == []
# The event should still be bound here if another handler unbinds
events.unbind(name, 'not the handler name')
events.trigger(name, {'amount': 2})
assert eventsHelper.ctr == 4
# Actually unbind the event, by going out of scope of "bound"
events.trigger(name, {'amount': 2})
assert eventsHelper.ctr == 4
# Bind an event that prevents the default action and passes a response
with events.bound(name, handlerName, eventsHelper._eatEvent), \
events.bound(name, 'other handler name',
eventsHelper._shouldNotBeCalled):
event = events.trigger(name)
assert event.defaultPrevented
assert not event.propagate
assert event.responses == [{'foo': 'bar'}]
# Test that the context manager unbinds after an unhandled exception
try:
with events.bound(failname, handlerName, eventsHelper._raiseException):
events.trigger(failname)
except Exception:
# The event should should be unbound at this point
events.trigger(failname)
@unittest.mock.patch.object(events, 'daemon', new=events.AsyncEventsThread())
def testAsyncEvents(eventsHelper):
name, failname = '_test.event', '_test.failure'
handlerName = '_test.handler'
def callback(event):
eventsHelper.ctr += 1
eventsHelper.responses = event.responses
with events.bound(failname, handlerName, eventsHelper._raiseException), \
events.bound(name, handlerName, eventsHelper._incrementWithResponse):
# Make sure an async handler that fails does not break the event
# loop and that its callback is not triggered.
assert events.daemon.eventQueue.qsize() == 0
events.daemon.trigger(failname, handlerName, callback)
# Triggering the event before the daemon starts should do nothing
assert events.daemon.eventQueue.qsize() == 1
events.daemon.trigger(name, {'amount': 2}, callback)
assert events.daemon.eventQueue.qsize() == 2
assert eventsHelper.ctr == 0
# Now run the asynchronous event handler, which should eventually
# cause our counter to be incremented.
events.daemon.start()
# Ensure that all of our events have been started within a
# reasonable amount of time. Also check the results in the loop,
# since the qsize only indicates if all events were started, not
# finished.
startTime = time.time()
while True:
if events.daemon.eventQueue.qsize() == 0:
if eventsHelper.ctr == 3:
break
if time.time() - startTime > 15:
break
time.sleep(0.1)
assert events.daemon.eventQueue.qsize() == 0
assert eventsHelper.ctr == 3
assert eventsHelper.responses == ['foo']
events.daemon.stop()
@unittest.mock.patch.object(events, 'daemon', new=events.ForegroundEventsDaemon())
def testForegroundDaemon(eventsHelper):
assert isinstance(events.daemon, events.ForegroundEventsDaemon)
# Should still be able to call start
events.daemon.start()
def callback(event):
eventsHelper.ctr += 1
eventsHelper.responses = event.responses
with events.bound('_test.event', '_test.handler', eventsHelper._raiseException):
with pytest.raises(Exception, match='^Failure condition$'):
events.daemon.trigger('_test.event', None, callback)
with events.bound('_test.event', '_test.handler', eventsHelper._incrementWithResponse):
events.daemon.trigger('_test.event', {'amount': 2}, callback)
assert eventsHelper.ctr == 3
assert eventsHelper.responses == ['foo']
events.daemon.stop()
| {
"content_hash": "c08a1117d40ee23ce016e40bfcbf5e39",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 91,
"avg_line_length": 36.10344827586207,
"alnum_prop": 0.6550143266475644,
"repo_name": "Kitware/girder",
"id": "7e89f08549436c44cf355258d9e76311482eb9f4",
"size": "5259",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "26244"
},
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "1528"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "JavaScript",
"bytes": "1176017"
},
{
"name": "Jinja",
"bytes": "322"
},
{
"name": "Mako",
"bytes": "7571"
},
{
"name": "Pug",
"bytes": "137980"
},
{
"name": "Python",
"bytes": "2018697"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Shell",
"bytes": "3354"
},
{
"name": "Stylus",
"bytes": "48706"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import os
import os.path
import re
import shutil
import subprocess
import sys
import tempfile
import time
############################################################
# Classes
############################################################
class Log(object):
"""Pretty print to the console."""
BLUE = "\033[1;34m"
GREEN = "\033[0;32m"
RED = "\033[1;31m"
RESET = "\033[0;0m"
YELLOW = "\033[0;33m"
MAGENTA = "\033[0;35m"
@staticmethod
def info(msg):
Log._print(Log.BLUE, msg)
@staticmethod
def warn(msg):
Log._print(Log.YELLOW, msg)
@staticmethod
def highlight(msg):
Log._print(Log.MAGENTA, msg)
@staticmethod
def success(msg):
Log._print(Log.GREEN, msg)
@staticmethod
def error(msg):
Log._print(Log.RED, msg)
@staticmethod
def _print(color, msg):
# More complete ts: '%Y-%m-%d %H:%M:%S'
ts = datetime.datetime.now().strftime("%H:%M:%S")
print("{}[{}] {}{}".format(color, ts, msg, Log.RESET))
class Paths(object):
"""All the output paths used in this script."""
def __init__(self, root_dir):
if not os.path.isdir(root_dir):
Log.info("Creating root dir [{}].".format(root_dir))
os.makedirs(root_dir)
self.root_dir = root_dir
self.stampede_raw_file = os.path.join(self.root_dir, "stampede.raw.log")
self.stampede_processed_file = os.path.join(
self.root_dir, "stampede.processed.log"
)
self.strace_raw_file = os.path.join(self.root_dir, "strace.raw.log")
self.strace_processed_file = os.path.join(self.root_dir, "strace.processed.log")
self.summary_file = os.path.join(self.root_dir, "summary.log")
def print_info(self):
Log.highlight("Using the following paths:")
Log.highlight(" root_dir=[{}]".format(self.root_dir))
Log.highlight(" stampede_raw=[{}]".format(self.stampede_raw_file))
Log.highlight(" strace_raw=[{}]".format(self.strace_raw_file))
Log.highlight(" stampede_processed=[{}]".format(self.stampede_raw_file))
Log.highlight(" strace_processed=[{}]".format(self.strace_raw_file))
Log.highlight(" summary=[{}]".format(self.summary_file))
############################################################
# Functions
############################################################
def parse_args():
parser = argparse.ArgumentParser(
description="Finds untracked source files in BUCK files."
)
parser.add_argument(
"-d",
"--root_dir",
dest="root_dir",
default=os.path.join(
os.path.expanduser("~"), "tmp/find_undeclared_source_files"
),
help="Root dir where all files will be created.",
)
parser.add_argument(
"build_targets", nargs="+", help="Buck build targets to analyse."
)
parser.add_argument(
"-m",
"--mode",
dest="modes",
action="append",
choices=[
"all",
"run_strace",
"run_stampede",
"process_strace",
"process_stampede",
"summarise",
],
default=None,
help="Mode to run this script in.",
)
parser.add_argument(
"-i",
"--include",
dest="includes",
action="append",
help=(
"If any include dir prefix is passed, then the processed output "
"file will **only** contain paths that start with any of the "
"provided prefixes."
),
)
parser.add_argument(
"-k",
"--known_root",
dest="known_roots",
action="append",
help="Known root directories to resolve strace relative paths.",
)
args = parser.parse_args()
if args.includes:
args.includes = [os.path.realpath(i) for i in args.includes]
if not args.modes:
args.modes = ["all"]
if args.known_roots is None:
args.known_roots = []
args.known_roots.insert(0, os.getcwd())
args.known_roots = unique_list([os.path.realpath(p) for p in args.known_roots])
return args
def unique_list(src_list):
"""Return unique elements of the list maintaining the original order."""
dst_list = []
all_elements = set()
for e in src_list:
if e not in all_elements:
all_elements.add(e)
dst_list.append(e)
return dst_list
def run_cmd(cmd):
"""Run a command on the bash. Raise an exception on error."""
Log.info("Running the following command:")
Log.info(" {0}".format(cmd))
try:
subprocess.check_call(cmd, shell=True)
Log.success("Command finished successfully with code 0.")
except subprocess.CalledProcessError as e:
error = "Failed to run command with exit code [{}]. cmd=[{}]".format(
e.returncode, e.cmd
)
Log.error(error)
raise e
def run_strace(out_file, build_targets):
"""Run a buck build wrapped with strace."""
assert type(build_targets) == list
buck_cmd = (
"buck build {build_targets} "
"--no-cache "
"--config cache.mode=dir "
"--config cache.slb_server_pool="
).format(build_targets=" ".join(build_targets))
cmd = "strace -f -e open -o {out_file} {buck_cmd}".format(
out_file=out_file, buck_cmd=buck_cmd
)
run_cmd(cmd)
def run_stampede(out_file, build_targets):
"""Run stampede build outputing the required source dependencies."""
assert type(build_targets) == list
cells_root_path = os.path.abspath(os.path.dirname(os.getcwd()))
cmd = (
"buck distbuild sourcefiles "
"--cells-root-path {cells_root_path} "
"--output-file {stampede_out} "
"--no-cache "
"--config cache.mode= "
"--config cache.slb_server_pool= {build_targets}"
).format(
stampede_out=out_file,
build_targets=" ".join(build_targets),
cells_root_path=cells_root_path,
)
run_cmd(cmd)
def process_stampede(raw_file, processed_file, includes):
"""Process and sanitise the stampede output file."""
Log.info("Processing file [{}].".format(raw_file))
all_lines = set()
with open(raw_file) as fp:
for line in fp:
path = os.path.realpath(line.strip())
if is_valid_path(path, includes):
all_lines.add(path + "\n")
with open(processed_file, "w") as fp:
fp.writelines(sorted(all_lines))
Log.success(
"[{}] lines were written into [{}].".format(len(all_lines), processed_file)
)
def process_strace(raw_file, processed_file, includes, known_roots):
Log.info("Processing file [{}].".format(raw_file))
pattern = re.compile(r'open\("([^"]+)"')
all_lines = set()
with open(raw_file) as fp:
for line in fp:
match = pattern.search(line)
if match:
path = match.group(1)
if not os.path.isabs(path):
for root in known_roots:
real_path = os.path.realpath(os.path.join(root, path))
if is_valid_path(real_path, includes):
all_lines.add(real_path + "\n")
else:
real_path = os.path.realpath(path)
if is_valid_path(real_path, includes):
all_lines.add(real_path + "\n")
with open(processed_file, "w") as fp:
fp.writelines(sorted(all_lines))
Log.success(
"[{}] lines were written into [{}].".format(len(all_lines), processed_file)
)
def should_run(current_mode, modes_arg):
if "all" in modes_arg:
return True
if current_mode in modes_arg:
return True
return False
def is_valid_path(abs_path, includes):
if not os.path.exists(abs_path) or os.path.isdir(abs_path):
return False
# We don't care about:
# - hidden directories.
# - buck-out.
# - BUCK files.
# - TARGETS files.
# - DEFS files.
if (
"/." in abs_path
or "/buck-out/" in abs_path
or abs_path.endswith("BUCK")
or abs_path.endswith("TARGETS")
or abs_path.endswith("DEFS")
):
return False
if includes:
for include in includes:
if abs_path.startswith(include):
return True
else:
return True
return False
def read_as_set(path):
with open(path) as fp:
return set(fp.readlines())
def summarise(stampede, strace, summary):
Log.info("Summarising missing files into [{}].".format(summary))
stampede_lines = read_as_set(stampede)
strace_lines = read_as_set(strace)
missing_lines = strace_lines.difference(stampede_lines)
by_extension = {}
for line in missing_lines:
extension = os.path.splitext(line)[1].strip()
if extension in by_extension:
by_extension[extension] += 1
else:
by_extension[extension] = 1
with open(summary, "w") as fp:
fp.write("Total Missing Dependencies: {}\n".format(len(missing_lines)))
fp.write("\n")
fp.write("== Missing Dependencies by File Extension ==\n")
def cmp_by_count(x, y):
return cmp(by_extension[y], by_extension[x])
for extension in sorted(by_extension.keys(), cmp_by_count):
ext = extension if len(extension) > 0 else "(NO_EXTENSION)"
fp.write("{:<15}: {}\n".format(ext, by_extension[extension]))
fp.write("\n")
fp.write("== All Missing Dependencies ==\n")
fp.writelines(sorted(missing_lines))
def confirm_at_repo_root():
buckconfig_path = os.path.join(os.getcwd(), ".buckconfig")
if not os.path.isfile(buckconfig_path):
msg = (
"This script must be run from the repository root."
" Could not find [.buckconfig] in CWD [{}]."
).format(os.getcwd())
Log.error(msg)
raise Exception(msg)
############################################################
# Main
############################################################
def main():
args = parse_args()
paths = Paths(args.root_dir)
paths.print_info()
confirm_at_repo_root()
if should_run("run_stampede", args.modes):
run_stampede(paths.stampede_raw_file, args.build_targets)
if should_run("run_strace", args.modes):
run_strace(paths.strace_raw_file, args.build_targets)
if should_run("process_stampede", args.modes):
process_stampede(
paths.stampede_raw_file, paths.stampede_processed_file, args.includes
)
if should_run("process_strace", args.modes):
process_strace(
paths.strace_raw_file,
paths.strace_processed_file,
args.includes,
args.known_roots,
)
if should_run("summarise", args.modes):
Log.info("Summarising...")
summarise(
paths.stampede_processed_file,
paths.strace_processed_file,
paths.summary_file,
)
Log.success("Finished finding undeclared source files successfully.")
if __name__ == "__main__":
main()
| {
"content_hash": "680dd7830aa36f95ad16979ea3b94368",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 88,
"avg_line_length": 31.376044568245124,
"alnum_prop": 0.5563742897727273,
"repo_name": "brettwooldridge/buck",
"id": "f68949dd544e2e3c1d37636a1c0766d615b2b3be",
"size": "11870",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/stampede/find_undeclared_source_files.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1585"
},
{
"name": "Batchfile",
"bytes": "3875"
},
{
"name": "C",
"bytes": "280326"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "18771"
},
{
"name": "CSS",
"bytes": "56106"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Dockerfile",
"bytes": "2081"
},
{
"name": "Go",
"bytes": "9822"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "10916"
},
{
"name": "Haskell",
"bytes": "1008"
},
{
"name": "IDL",
"bytes": "480"
},
{
"name": "Java",
"bytes": "28622919"
},
{
"name": "JavaScript",
"bytes": "938678"
},
{
"name": "Kotlin",
"bytes": "23444"
},
{
"name": "Lex",
"bytes": "7502"
},
{
"name": "MATLAB",
"bytes": "47"
},
{
"name": "Makefile",
"bytes": "1916"
},
{
"name": "OCaml",
"bytes": "4935"
},
{
"name": "Objective-C",
"bytes": "176943"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "2244"
},
{
"name": "Python",
"bytes": "2069290"
},
{
"name": "Roff",
"bytes": "1207"
},
{
"name": "Rust",
"bytes": "5214"
},
{
"name": "Scala",
"bytes": "5082"
},
{
"name": "Shell",
"bytes": "76854"
},
{
"name": "Smalltalk",
"bytes": "194"
},
{
"name": "Swift",
"bytes": "11393"
},
{
"name": "Thrift",
"bytes": "47828"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import before_now, iso_format
class ProjectEventDetailsTest(APITestCase, SnubaTestCase):
def setUp(self):
super(ProjectEventDetailsTest, self).setUp()
self.login_as(user=self.user)
project = self.create_project()
one_min_ago = iso_format(before_now(minutes=1))
two_min_ago = iso_format(before_now(minutes=2))
three_min_ago = iso_format(before_now(minutes=3))
four_min_ago = iso_format(before_now(minutes=4))
self.prev_event = self.store_event(
data={"event_id": "a" * 32, "timestamp": four_min_ago, "fingerprint": ["group-1"]},
project_id=project.id,
)
self.cur_event = self.store_event(
data={"event_id": "b" * 32, "timestamp": three_min_ago, "fingerprint": ["group-1"]},
project_id=project.id,
)
self.next_event = self.store_event(
data={
"event_id": "c" * 32,
"timestamp": two_min_ago,
"fingerprint": ["group-1"],
"environment": "production",
"tags": {"environment": "production"},
},
project_id=project.id,
)
# Event in different group
self.store_event(
data={
"event_id": "d" * 32,
"timestamp": one_min_ago,
"fingerprint": ["group-2"],
"environment": "production",
"tags": {"environment": "production"},
},
project_id=project.id,
)
def test_snuba(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.cur_event.event_id,
"project_slug": self.cur_event.project.slug,
"organization_slug": self.cur_event.project.organization.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.cur_event.event_id)
assert response.data["nextEventID"] == six.text_type(self.next_event.event_id)
assert response.data["previousEventID"] == six.text_type(self.prev_event.event_id)
assert response.data["groupID"] == six.text_type(self.cur_event.group.id)
def test_snuba_no_prev(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.prev_event.event_id,
"project_slug": self.prev_event.project.slug,
"organization_slug": self.prev_event.project.organization.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.prev_event.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] == self.cur_event.event_id
assert response.data["groupID"] == six.text_type(self.prev_event.group.id)
def test_snuba_with_environment(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.cur_event.event_id,
"project_slug": self.cur_event.project.slug,
"organization_slug": self.cur_event.project.organization.slug,
},
)
response = self.client.get(
url, format="json", data={"enable_snuba": "1", "environment": ["production", "staging"]}
)
response = self.client.get(
url, format="json", data={"environment": ["production", "staging"]}
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.cur_event.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] == self.next_event.event_id
assert response.data["groupID"] == six.text_type(self.prev_event.group.id)
def test_ignores_different_group(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.next_event.event_id,
"project_slug": self.next_event.project.slug,
"organization_slug": self.next_event.project.organization.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.next_event.event_id)
assert response.data["nextEventID"] is None
class ProjectEventJsonEndpointTest(APITestCase, SnubaTestCase):
def setUp(self):
super(ProjectEventJsonEndpointTest, self).setUp()
self.login_as(user=self.user)
self.event_id = "c" * 32
self.fingerprint = ["group_2"]
self.min_ago = iso_format(before_now(minutes=1))
self.event = self.store_event(
data={
"event_id": self.event_id,
"timestamp": self.min_ago,
"fingerprint": self.fingerprint,
"user": {"email": self.user.email},
},
project_id=self.project.id,
)
self.url = reverse(
"sentry-api-0-event-json",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": self.project.slug,
"event_id": self.event_id,
},
)
def assert_event(self, data):
assert data["event_id"] == self.event_id
assert data["user"]["email"] == self.user.email
assert data["datetime"][:19] == self.min_ago
assert data["fingerprint"] == self.fingerprint
def test_simple(self):
response = self.client.get(self.url, format="json")
assert response.status_code == 200, response.content
self.assert_event(response.data)
def test_event_does_not_exist(self):
self.url = reverse(
"sentry-api-0-event-json",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": self.project.slug,
"event_id": "no" * 16,
},
)
response = self.client.get(self.url, format="json")
assert response.status_code == 404, response.content
assert response.data == {"detail": "Event not found"}
def test_user_unauthorized(self):
user = self.create_user()
self.login_as(user)
response = self.client.get(self.url, format="json")
assert response.status_code == 403, response.content
assert response.data == {"detail": "You do not have permission to perform this action."}
def test_project_not_associated_with_event(self):
project2 = self.create_project(organization=self.organization)
url = reverse(
"sentry-api-0-event-json",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": project2.slug,
"event_id": self.event_id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
assert response.data == {"detail": "Event not found"}
| {
"content_hash": "9449b3416eb425d2807b553e1412ff96",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 100,
"avg_line_length": 39.27461139896373,
"alnum_prop": 0.5688654353562005,
"repo_name": "mvaled/sentry",
"id": "c682d498eb8b07f31e84703e7cc64879b68fa751",
"size": "7580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/snuba/api/endpoints/test_project_event_details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
import frappe
import unittest
class TestSMSSettings(unittest.TestCase):
pass
| {
"content_hash": "962ee84573724c68736c186aa42b9b71",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 41,
"avg_line_length": 15.8,
"alnum_prop": 0.8354430379746836,
"repo_name": "mhbu50/frappe",
"id": "b3be912f9e1fc9e36198d540252a0ffb79528046",
"size": "190",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/sms_settings/test_sms_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "247122"
},
{
"name": "JavaScript",
"bytes": "2359670"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3464477"
},
{
"name": "SCSS",
"bytes": "248877"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
} |
import struct
__author__ = 'tom1231'
from BAL.Header.RiCHeader import RiCHeader
RES_ID = 101
class ConnectionResponse(RiCHeader):
def dataTosend(self):
return RiCHeader.dataTosend(self) + struct.pack('<?', self._toConnect)
def __init__(self, toConnect):
RiCHeader.__init__(self)
self._id = RES_ID
self._des = 0x1001
self._length = 7
self._checkSum = 0
self._toConnect = toConnect
self._checkSum = self.calCheckSum(self.dataTosend())
| {
"content_hash": "d178ac5597a88631e17dbe78c1182a47",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 21.416666666666668,
"alnum_prop": 0.6206225680933852,
"repo_name": "robotican/ric",
"id": "db330975f1745efe66743590ae34fbe74cb5c926",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/indigo-devel",
"path": "ric_board/scripts/RiCTraffic/BAL/Header/Response/ConnectionResponse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "142621"
},
{
"name": "C",
"bytes": "39802"
},
{
"name": "C#",
"bytes": "316699"
},
{
"name": "C++",
"bytes": "3208133"
},
{
"name": "CMake",
"bytes": "10309"
},
{
"name": "Objective-C",
"bytes": "2102"
},
{
"name": "Processing",
"bytes": "20189"
},
{
"name": "Python",
"bytes": "12216967"
},
{
"name": "Shell",
"bytes": "7029"
}
],
"symlink_target": ""
} |
import crispy_forms
from setuptools import setup, find_packages
tests_require = [
'Django>=1.3,<1.8',
]
setup(
name='django-crispy-forms',
version=crispy_forms.__version__,
description="Best way to have Django DRY forms",
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
extras_require={
'tests': tests_require,
},
keywords=['forms', 'django', 'crispy', 'DRY'],
author='Miguel Araujo',
author_email='[email protected]',
url='http://github.com/maraujop/django-crispy-forms',
license='MIT',
packages=find_packages(exclude=['docs']),
include_package_data=True,
zip_safe=False,
)
| {
"content_hash": "a2a804f23aaeb8f362278b935fe2ef44",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 71,
"avg_line_length": 31.75,
"alnum_prop": 0.6110236220472441,
"repo_name": "zixan/django-crispy-forms",
"id": "79444dd90f91e18c47a6e28909c674c78388baea",
"size": "1270",
"binary": false,
"copies": "9",
"ref": "refs/heads/dev",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "44357"
},
{
"name": "Makefile",
"bytes": "187"
},
{
"name": "Python",
"bytes": "177614"
}
],
"symlink_target": ""
} |
from .ApplicationConfiguration import ApplicationConfiguration
import importlib
class PersistentImageManager(object):
""" Abstract base class for the Persistence managers """
_default_manager = None
@classmethod
def default_manager(cls):
if not cls._default_manager:
appconfig = ApplicationConfiguration().configuration
class_name = appconfig['image_manager'].capitalize() + "PersistentImageManager"
kwargs = appconfig['image_manager_args']
# The current defaults are 'file' for class name and
# { "storage_location": "/var/lib/imagefactory/storage" } for the args
pim_module = importlib.import_module( ".." + class_name, __name__)
pim_class = getattr(pim_module, class_name)
cls._default_manager = pim_class(**kwargs)
return cls._default_manager
def __init__(self, storage_path = None):
raise NotImplementedError("PersistentImageManager is an abstract class. You must instantiate a real manager.")
def image_with_id(self, image_id):
"""
TODO: Docstring for image_with_id
@param image_id TODO
@return TODO
"""
raise NotImplementedError("image_with_id() not implemented - cannot continue")
def images_from_query(self, query):
"""
TODO: Docstring for images_from_query
@param image_id TODO
@return TODO
"""
raise NotImplementedError("images_from_query() not implemented - cannot continue")
def add_image(self, image):
"""
TODO: Docstring for add_image
@param image TODO
@return TODO
"""
raise NotImplementedError("add_image() not implemented - cannot continue")
def save_image(self, image):
"""
TODO: Docstring for save_image
@param image TODO
@return TODO
"""
raise NotImplementedError("save_image() not implemented - cannot continue")
def delete_image_with_id(self, image_id):
"""
TODO: Docstring for delete_image_with_id
@param image_id TODO
@return TODO
"""
raise NotImplementedError("delete_image_with_id() not implemented - cannot continue")
| {
"content_hash": "801d4f354d055dccd8877e701bdd1fb9",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 119,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6184615384615385,
"repo_name": "redhat-imaging/imagefactory",
"id": "b5a0ce8e7130ada74d8536fcd969d1a0cc73c2d1",
"size": "2892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imgfac/PersistentImageManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1015"
},
{
"name": "Python",
"bytes": "819421"
},
{
"name": "Shell",
"bytes": "4171"
}
],
"symlink_target": ""
} |
import logging
import tempfile
from uuid import uuid4
from mongo_orchestration.container import Container
from mongo_orchestration.errors import ShardedClusterError
from mongo_orchestration.servers import Servers
from mongo_orchestration.replica_sets import ReplicaSets
from mongo_orchestration.singleton import Singleton
from pymongo import MongoClient
logger = logging.getLogger(__name__)
class ShardedCluster(object):
"""class represents Sharding configuration"""
def __init__(self, params):
"""init configuration acording params"""
self.id = params.get('id', None) or str(uuid4())
self.login = params.get('login', '')
self.password = params.get('password', '')
self.auth_key = params.get('auth_key', None)
self._version = params.get('version')
self._configsvrs = []
self._routers = []
self._shards = {}
self.tags = {}
self.sslParams = params.get('sslParams', {})
self.kwargs = {}
if not not self.sslParams:
self.kwargs['ssl'] = True
self.__init_configsvr(params.get('configsvrs', [{}]))
for r in params.get('routers', [{}]):
self.router_add(r)
for cfg in params.get('shards', []):
shard_params = cfg.get('shardParams', {})
shard_tags = shard_params.pop('tags', None)
info = self.member_add(cfg.get('id', None), shard_params)
if shard_tags:
self.tags[info['id']] = shard_tags
if self.tags:
for sh_id in self.tags:
logger.debug('Add tags %r to %s' % (self.tags[sh_id], sh_id))
self.connection().config.shards.update(
{'_id': sh_id},
{'$addToSet': {'$each': self.tags[sh_id]}})
if self.login:
client = MongoClient(self.router['hostname'], **self.kwargs)
client.admin.add_user(self.login, self.password,
roles=['__system',
'clusterAdmin',
'dbAdminAnyDatabase',
'readWriteAnyDatabase',
'userAdminAnyDatabase'])
def __init_configsvr(self, params):
"""create and start config servers"""
self._configsvrs = []
for cfg in params:
cfg.update({'configsvr': True})
self._configsvrs.append(Servers().create(
'mongod', cfg,
sslParams=self.sslParams, autostart=True,
auth_key=self.auth_key, version=self._version))
def __len__(self):
return len(self._shards)
@property
def configsvrs(self):
"""return list of config servers"""
return [{'id': h_id, 'hostname': Servers().hostname(h_id)} for h_id in self._configsvrs]
@property
def routers(self):
"""return list of routers"""
return [{'id': h_id, 'hostname': Servers().hostname(h_id)} for h_id in self._routers]
@property
def members(self):
"""return list of members"""
# return [{'id': shard, 'hostname': Servers().hostname(info['_id'])} for shard, info in self._shards.items()]
return [self.member_info(item) for item in self._shards]
@property
def router(self):
"""return first available router"""
for server in self._routers:
info = Servers().info(server)
if info['procInfo'].get('alive', False):
return {'id': server, 'hostname': Servers().hostname(server)}
def router_add(self, params):
"""add new router (mongos) into existing configuration"""
cfgs = ','.join([Servers().info(item)['uri'] for item in self._configsvrs])
params.update({'configdb': cfgs})
self._routers.append(Servers().create(
'mongos', params, sslParams=self.sslParams, autostart=True,
auth_key=self.auth_key, version=self._version))
return {'id': self._routers[-1], 'hostname': Servers().hostname(self._routers[-1])}
def connection(self):
c = MongoClient(self.router['hostname'], **self.kwargs)
try:
self.login and self.password and c.admin.authenticate(self.login, self.password)
except:
pass
return c
def router_command(self, command, arg=None, is_eval=False):
"""run command on the router server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result
"""
mode = is_eval and 'eval' or 'command'
if isinstance(arg, tuple):
name, d = arg
else:
name, d = arg, {}
result = getattr(self.connection().admin, mode)(command, name, **d)
return result
def router_remove(self, router_id):
"""remove """
result = Servers().remove(router_id)
del self._routers[ self._routers.index(router_id) ]
return { "ok": 1, "routers": self._routers }
def _add(self, shard_uri, name):
"""execute addShard command"""
return self.router_command("addShard", (shard_uri, {"name": name}), is_eval=False)
def member_add(self, member_id=None, params=None):
"""add new member into existing configuration"""
member_id = member_id or str(uuid4())
if 'members' in params:
# is replica set
rs_params = params.copy()
rs_params.update({'auth_key': self.auth_key})
rs_params.update({'sslParams': self.sslParams})
if self.login and self.password:
rs_params.update({'login': self.login, 'password': self.password})
if self._version:
rs_params['version'] = self._version
rs_id = ReplicaSets().create(rs_params)
members = ReplicaSets().members(rs_id)
cfgs = rs_id + r"/" + ','.join([item['host'] for item in members])
result = self._add(cfgs, member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isReplicaSet': True, '_id': rs_id}
# return self._shards[result['shardAdded']].copy()
return self.member_info(member_id)
else:
# is single server
params.update({'autostart': True, 'auth_key': self.auth_key, 'sslParams': self.sslParams})
params['procParams'] = params.get('procParams', {})
if self._version:
params['version'] = self._version
logger.debug("servers create params: {params}".format(**locals()))
server_id = Servers().create('mongod', **params)
result = self._add(Servers().info(server_id)['uri'], member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isServer': True, '_id': server_id}
# return self._shards[result['shardAdded']]
return self.member_info(member_id)
def member_info(self, member_id):
"""return info about member"""
info = self._shards[member_id].copy()
info['id'] = member_id
info['tags'] = self.tags.get(member_id, list())
return info
def _remove(self, shard_name):
"""remove member from configuration"""
result = self.router_command("removeShard", shard_name, is_eval=False)
if result['ok'] == 1 and result['state'] == 'completed':
shard = self._shards.pop(shard_name)
if shard.get('isServer', False):
Servers().remove(shard['_id'])
if shard.get('isReplicaSet', False):
ReplicaSets().remove(shard['_id'])
return result
def member_remove(self, member_id):
"""remove member from configuration"""
return self._remove(member_id)
def reset(self):
"""Ensure all shards, configs, and routers are running and available."""
# Ensure all shards by calling "reset" on each.
for shard_id in self._shards:
if self._shards[shard_id].get('isReplicaSet'):
singleton = ReplicaSets()
elif self._shards[shard_id].get('isServer'):
singleton = Servers()
singleton.command(self._shards[shard_id]['_id'], 'reset')
# Ensure all config servers by calling "reset" on each.
for config_id in self._configsvrs:
Servers().command(config_id, 'reset')
# Ensure all routers by calling "reset" on each.
for router_id in self._routers:
Servers().command(router_id, 'reset')
return self.info()
def info(self):
"""return info about configuration"""
uri = ','.join(x['hostname'] for x in self.routers)
mongodb_uri = 'mongodb://' + uri
return {'id': self.id,
'shards': self.members,
'configsvrs': self.configsvrs,
'routers': self.routers,
'uri': uri,
'mongodb_uri': mongodb_uri,
'orchestration': 'sharded_clusters'}
def cleanup(self):
"""cleanup configuration: stop and remove all servers"""
for _id, shard in self._shards.items():
if shard.get('isServer', False):
Servers().remove(shard['_id'])
if shard.get('isReplicaSet', False):
ReplicaSets().remove(shard['_id'])
for mongos in self._routers:
Servers().remove(mongos)
for configsvr in self._configsvrs:
Servers().remove(configsvr)
self._configsvrs = []
self._routers = []
self._shards = {}
class ShardedClusters(Singleton, Container):
""" ShardedClusters is a dict-like collection for ShardedCluster objects"""
_name = 'shards'
_obj_type = ShardedCluster
releases = {}
pids_file = tempfile.mktemp(prefix="mongo-")
def set_settings(self, releases=None, default_release=None):
"""set path to storage"""
super(ShardedClusters, self).set_settings(releases, default_release)
ReplicaSets().set_settings(releases, default_release)
def __getitem__(self, key):
return self.info(key)
def cleanup(self):
"""remove all servers with their data"""
for server in self:
self.remove(server)
def create(self, params):
"""create new ShardedCluster
Args:
params - dictionary with specific params for instance
Return cluster_id
where cluster_id - id which can use to take the cluster from servers collection
"""
sh_id = params.get('id', str(uuid4()))
if sh_id in self:
raise ShardedClusterError(
"Sharded cluster with id %s already exists." % sh_id)
params['id'] = sh_id
cluster = ShardedCluster(params)
self[cluster.id] = cluster
return cluster.id
def remove(self, cluster_id):
"""remove cluster and data stuff
Args:
cluster_id - cluster identity
"""
cluster = self._storage.pop(cluster_id)
cluster.cleanup()
def info(self, cluster_id):
"""return dictionary object with info about cluster
Args:
cluster_id - cluster identity
"""
return self._storage[cluster_id].info()
def configsvrs(self, cluster_id):
"""return list of config servers"""
return self._storage[cluster_id].configsvrs
def routers(self, cluster_id):
"""return list of routers"""
return self._storage[cluster_id].routers
def router_add(self, cluster_id, params):
"""add new router"""
cluster = self._storage[cluster_id]
result = cluster.router_add(params)
self._storage[cluster_id] = cluster
return result
def router_del(self, cluster_id, router_id):
"""remove router from the ShardedCluster"""
cluster = self._storage[cluster_id]
result = cluster.router_remove(router_id)
self._storage[cluster_id] = cluster
return result
def members(self, cluster_id):
"""return list of members"""
return self._storage[cluster_id].members
def member_info(self, cluster_id, member_id):
"""return info about member"""
cluster = self._storage[cluster_id]
return cluster.member_info(member_id)
def command(self, cluster_id, command, *args):
"""Call a ShardedCluster method."""
cluster = self._storage[cluster_id]
try:
return getattr(cluster, command)(*args)
except AttributeError:
raise ValueError("Cannot issue the command %r to ShardedCluster %s"
% (command, cluster_id))
def member_del(self, cluster_id, member_id):
"""remove member from cluster cluster"""
cluster = self._storage[cluster_id]
result = cluster.member_remove(member_id)
self._storage[cluster_id] = cluster
return result
def member_add(self, cluster_id, params):
"""add new member into configuration"""
cluster = self._storage[cluster_id]
result = cluster.member_add(params.get('id', None), params.get('shardParams', {}))
self._storage[cluster_id] = cluster
return result
| {
"content_hash": "f4909f5149a5bb55f471f5938cb3fb5d",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 117,
"avg_line_length": 37.904225352112675,
"alnum_prop": 0.5682223543400713,
"repo_name": "jyemin/mongo-orchestration",
"id": "34da95531da4deb2bfe5514f0b84051bfb0d7979",
"size": "14068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongo_orchestration/sharded_clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
def seek_and_read(file_name, buf_size, byte_number):
with open(file_name) as f:
f.seek(byte_number)
buf = f.read(buf_size)
return buf
def main():
buf_size = 48
byte_number = 6
print seek_and_read(
'./files_random_access_input_output.py',
buf_size,
byte_number)
if __name__ == '__main__':
main()
| {
"content_hash": "c80503477534e486bbb97203e21a581f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 52,
"avg_line_length": 22.5625,
"alnum_prop": 0.556786703601108,
"repo_name": "adsznzhang/learntosolveit",
"id": "a98903ea8a69b75c90513e10b6a2336801bcca9d",
"size": "361",
"binary": false,
"copies": "3",
"ref": "refs/heads/version1",
"path": "languages/python/files_random_access_input_output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "284320"
},
{
"name": "CSS",
"bytes": "3038"
},
{
"name": "HTML",
"bytes": "6727"
},
{
"name": "Java",
"bytes": "138605"
},
{
"name": "JavaScript",
"bytes": "722"
},
{
"name": "Makefile",
"bytes": "3889"
},
{
"name": "Python",
"bytes": "156544"
},
{
"name": "Ruby",
"bytes": "4290"
},
{
"name": "Scala",
"bytes": "8545"
}
],
"symlink_target": ""
} |
"""
IdleBot
Copyright (c) 2015, kernelpanic3, Sorch & JeDa
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of IdleBot nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__version__ = "1.0"
import signal
import sys
import bottom
import asyncio
from bin import peewee as p
from bin import stasis as s
database = p.SqliteDatabase('idlebot.db', threadlocals=True)
class BaseModel(p.Model):
class Meta:
database = database
class User(BaseModel):
id = p.IntegerField(primary_key=True)
name = p.CharField()
role = p.CharField()
level = p.IntegerField()
password = p.CharField()
class Meta:
db_table = "users"
try:
User.create_table(True)
except:
pass
# Be sure the user is running the correct version.
if sys.version_info < (3, 4):
print('fatal: you need Python 3.4 or newer to run IdleBot')
sys.exit(1)
# Parse our config...
try:
import config as C
except ImportError:
print('fatal: please actually configure IdleBot *before* running it')
sys.exit(1)
except Exception as err:
print('fatal: exception raised parsing configuration: {}'.format(err))
sys.exit(1)
if C.IANCP:
print('fatal: please configure IdleBot properly')
sys.exit(1)
# Basic routines (connect, ping, etc.)
# Tell library where to connect and what to do
bot = bottom.Client(C.HOST, C.PORT, 'UTF-8', C.USE_SSL)
@bot.on('CLIENT_CONNECT')
def connect():
bot.send('NICK', nick=C.NICK)
bot.send('USER', user=C.NICK, realname=C.REALNAME)
# Sleep for 5 so we can register with the server before joining
yield from asyncio.sleep(5)
bot.send('JOIN', channel=C.CHANNEL)
# We are alive!
@bot.on('PING')
def keepalive(message):
bot.send('PONG', message=message)
# Auto-reconnect if connection dies
@bot.on('client_disconnect')
def reconnect():
# Snooze briefly and then attempt to reconnect...
yield from asyncio.sleep(10)
yield from bot.connect()
# Generic message sending routines
# For now this just echoes anything you send it as a test...
@bot.on('PRIVMSG')
def message(nick, target, message):
''' Echo all messages '''
# Don't echo ourselves
if nick == C.NICK:
return
# Direct message to bot
if target == C.NICK:
bot.send("PRIVMSG", target=nick, message=message)
# Message in channel
else:
bot.send("PRIVMSG", target=target, message=message)
# Signal handling
def signal_handler(signum, frame):
signals = dict((getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') and '_' not in n )
print('\n{} received.\n'.format(signals[signum]))
bot.send("QUIT", message="{}".format(signals[signum]))
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
asyncio.get_event_loop().run_until_complete(bot.run(True))
| {
"content_hash": "1887c6748a5a981fdda9b9cac8396cbe",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 105,
"avg_line_length": 33.28125,
"alnum_prop": 0.7248826291079812,
"repo_name": "kernelpanic3/IdleBot",
"id": "2083c1841b3ec1a2441b18dae334b107892bd07b",
"size": "4260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idlebot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "201228"
}
],
"symlink_target": ""
} |
import subprocess as sp
import tempfile as tmp
import cfl
import os
def bart(nargout, cmd, *args):
if type(nargout) != int or nargout < 0:
print("Usage: bart(<nargout>, <command>, <arguements...>)");
return None
bart_path = os.environ['TOOLBOX_PATH'] + '/bart ';
if not bart_path:
if os.path.isfile('/usr/local/bin/bart'):
bart_path = '/usr/local/bin'
elif os.path.isfile('/usr/bin/bart'):
bart_path = '/usr/bin'
else:
raise Exception('Environment variable TOOLBOX_PATH is not set.')
name = tmp.NamedTemporaryFile().name
nargin = len(args);
infiles = [name + 'in' + str(idx) for idx in range(nargin)]
in_str = ' '.join(infiles)
for idx in range(nargin):
cfl.writecfl(infiles[idx], args[idx])
outfiles = [name + 'out' + str(idx) for idx in range(nargout)]
out_str = ' '.join(outfiles)
#TODO: Windows option.
ERR = os.system(bart_path + '/bart ' + cmd + ' ' + in_str + ' ' + out_str);
for elm in infiles:
if os.path.isfile(elm + '.cfl'):
os.remove(elm + '.cfl')
if os.path.isfile(elm + '.hdr'):
os.remove(elm + '.hdr')
output = []
for idx in range(nargout):
elm = outfiles[idx]
if not ERR:
output.append(cfl.readcfl(elm))
if os.path.isfile(elm + '.cfl'):
os.remove(elm + '.cfl')
if os.path.isfile(elm + '.hdr'):
os.remove(elm + '.hdr')
if ERR:
raise Exception("Command exited with an error.")
if nargout == 1:
output = output[0]
return output
| {
"content_hash": "e57a69bea8f2160856729aaede5078eb",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 27.71186440677966,
"alnum_prop": 0.5492354740061162,
"repo_name": "mjacob75/bart",
"id": "442bf80b102f362c80347d50cdcd9521a8fcea00",
"size": "1880",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/bart.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1254305"
},
{
"name": "C++",
"bytes": "2290"
},
{
"name": "CMake",
"bytes": "46995"
},
{
"name": "Cuda",
"bytes": "127993"
},
{
"name": "Makefile",
"bytes": "31083"
},
{
"name": "Matlab",
"bytes": "5876"
},
{
"name": "Python",
"bytes": "17322"
},
{
"name": "Shell",
"bytes": "9374"
}
],
"symlink_target": ""
} |
import time
import uuid
def get_random_id():
#NOTE: It is very important that these random IDs NOT start with a number.
random_id = '_' + uuid.uuid4().hex
return random_id
def get_time_string(delta=0):
return time.strftime("%Y-%m-%dT%H:%M:%SZ",time.gmtime(time.time() + delta))
| {
"content_hash": "27d19d7e8c1310f8f840da3f34dc8908",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 29.6,
"alnum_prop": 0.6655405405405406,
"repo_name": "unomena/django-saml2-sp",
"id": "a7e507beae60c74490deb54e95fd1e4378c4a692",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saml2sp/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19431"
}
],
"symlink_target": ""
} |
"""
Support for the Withings API.
For more details about this platform, please refer to the documentation at
"""
import voluptuous as vol
from withings_api import WithingsAuth
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers import config_validation as cv, config_entry_oauth2_flow
from . import config_flow, const
from .common import _LOGGER, get_data_manager, NotAuthenticatedError
DOMAIN = const.DOMAIN
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(const.CLIENT_ID): vol.All(cv.string, vol.Length(min=1)),
vol.Required(const.CLIENT_SECRET): vol.All(
cv.string, vol.Length(min=1)
),
vol.Required(const.PROFILES): vol.All(
cv.ensure_list,
vol.Unique(),
vol.Length(min=1),
[vol.All(cv.string, vol.Length(min=1))],
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the Withings component."""
conf = config.get(DOMAIN, {})
if not conf:
return True
hass.data[DOMAIN] = {const.CONFIG: conf}
config_flow.WithingsFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
const.DOMAIN,
conf[const.CLIENT_ID],
conf[const.CLIENT_SECRET],
f"{WithingsAuth.URL}/oauth2_user/authorize2",
f"{WithingsAuth.URL}/oauth2/token",
),
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up Withings from a config entry."""
# Upgrading existing token information to hass managed tokens.
if "auth_implementation" not in entry.data:
_LOGGER.debug("Upgrading existing config entry")
data = entry.data
creds = data.get(const.CREDENTIALS, {})
hass.config_entries.async_update_entry(
entry,
data={
"auth_implementation": const.DOMAIN,
"implementation": const.DOMAIN,
"profile": data.get("profile"),
"token": {
"access_token": creds.get("access_token"),
"refresh_token": creds.get("refresh_token"),
"expires_at": int(creds.get("token_expiry")),
"type": creds.get("token_type"),
"userid": creds.get("userid") or creds.get("user_id"),
},
},
)
implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
data_manager = get_data_manager(hass, entry, implementation)
_LOGGER.debug("Confirming we're authenticated")
try:
await data_manager.check_authenticated()
except NotAuthenticatedError:
_LOGGER.error(
"Withings auth tokens exired for profile %s, remove and re-add the integration",
data_manager.profile,
)
return False
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload Withings config entry."""
return await hass.config_entries.async_forward_entry_unload(entry, "sensor")
| {
"content_hash": "c28934f0186f47d09e45a50ea0ef52ea",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 92,
"avg_line_length": 32.60909090909091,
"alnum_prop": 0.5996654586005018,
"repo_name": "qedi-r/home-assistant",
"id": "482c4e96e5cec54a78b816247495c948fde1f30b",
"size": "3587",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/withings/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
"""
Settings for the ctcf_peaks example script
"""
import gffutils
import metaseq
UPSTREAM = 1000
DOWNSTREAM = 1000
BINS = 100
FRAGMENT_SIZE = 200
GENOME = 'hg19'
CHROMS = ['chr1', 'chr2']
gtfdb = metaseq.example_filename('Homo_sapiens.GRCh37.66.cleaned.gtf.db')
G = gffutils.FeatureDB(gtfdb)
| {
"content_hash": "da635737874b64e2ec2584634788a8d3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 73,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.7322033898305085,
"repo_name": "agrimaldi/metaseq",
"id": "b22b94bd1ba91018c5d360c6edb1076bb4b81fbd",
"size": "295",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "metaseq/test/examples/atf3_peaks_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "237212"
},
{
"name": "R",
"bytes": "661"
},
{
"name": "Shell",
"bytes": "28947"
}
],
"symlink_target": ""
} |
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of13']
class instruction_id(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = instruction_id.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = instruction_id()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("instruction_id {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class apply_actions(instruction_id):
type = 4
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = apply_actions()
_type = reader.read("!H")[0]
assert(_type == 4)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("apply_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[4] = apply_actions
class experimenter(instruction_id):
subtypes = {}
type = 65535
def __init__(self, experimenter=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_arp_offload(bsn):
type = 65535
experimenter = 6035143
subtype = 1
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_arp_offload()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_arp_offload {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[1] = bsn_arp_offload
class bsn_auto_negotiation(bsn):
type = 65535
experimenter = 6035143
subtype = 11
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_auto_negotiation()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 11)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_auto_negotiation {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[11] = bsn_auto_negotiation
class bsn_deny(bsn):
type = 65535
experimenter = 6035143
subtype = 5
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_deny()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 5)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_deny {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[5] = bsn_deny
class bsn_dhcp_offload(bsn):
type = 65535
experimenter = 6035143
subtype = 2
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_dhcp_offload()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_dhcp_offload {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[2] = bsn_dhcp_offload
class bsn_disable_l3(bsn):
type = 65535
experimenter = 6035143
subtype = 13
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_l3()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 13)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_l3 {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[13] = bsn_disable_l3
class bsn_disable_split_horizon_check(bsn):
type = 65535
experimenter = 6035143
subtype = 3
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_split_horizon_check()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_split_horizon_check {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[3] = bsn_disable_split_horizon_check
class bsn_disable_src_mac_check(bsn):
type = 65535
experimenter = 6035143
subtype = 0
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_src_mac_check()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 0)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_src_mac_check {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[0] = bsn_disable_src_mac_check
class bsn_disable_vlan_counters(bsn):
type = 65535
experimenter = 6035143
subtype = 9
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_vlan_counters()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 9)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_vlan_counters {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[9] = bsn_disable_vlan_counters
class bsn_internal_priority(bsn):
type = 65535
experimenter = 6035143
subtype = 12
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_internal_priority()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 12)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_internal_priority {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[12] = bsn_internal_priority
class bsn_packet_of_death(bsn):
type = 65535
experimenter = 6035143
subtype = 6
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_packet_of_death()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_packet_of_death {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[6] = bsn_packet_of_death
class bsn_permit(bsn):
type = 65535
experimenter = 6035143
subtype = 4
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_permit()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_permit {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[4] = bsn_permit
class bsn_prioritize_pdus(bsn):
type = 65535
experimenter = 6035143
subtype = 7
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_prioritize_pdus()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 7)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_prioritize_pdus {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[7] = bsn_prioritize_pdus
class bsn_require_vlan_xlate(bsn):
type = 65535
experimenter = 6035143
subtype = 8
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_require_vlan_xlate()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 8)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_require_vlan_xlate {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[8] = bsn_require_vlan_xlate
class bsn_span_destination(bsn):
type = 65535
experimenter = 6035143
subtype = 10
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_span_destination()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 10)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_span_destination {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[10] = bsn_span_destination
class clear_actions(instruction_id):
type = 5
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = clear_actions()
_type = reader.read("!H")[0]
assert(_type == 5)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("clear_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[5] = clear_actions
class goto_table(instruction_id):
type = 1
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = goto_table()
_type = reader.read("!H")[0]
assert(_type == 1)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("goto_table {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[1] = goto_table
class meter(instruction_id):
type = 6
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = meter()
_type = reader.read("!H")[0]
assert(_type == 6)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("meter {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[6] = meter
class write_actions(instruction_id):
type = 3
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_actions()
_type = reader.read("!H")[0]
assert(_type == 3)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("write_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[3] = write_actions
class write_metadata(instruction_id):
type = 2
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_metadata()
_type = reader.read("!H")[0]
assert(_type == 2)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("write_metadata {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[2] = write_metadata
| {
"content_hash": "7614ab3144da98f79ca41fac85b97452",
"timestamp": "",
"source": "github",
"line_count": 1036,
"max_line_length": 76,
"avg_line_length": 27.75096525096525,
"alnum_prop": 0.5354782608695652,
"repo_name": "opencord/voltha",
"id": "c5f0ca6df18cdf5b1b9064295b904ebd3f7f6ea0",
"size": "29700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ofagent/loxi/of13/instruction_id.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "30265"
},
{
"name": "Dockerfile",
"bytes": "2881"
},
{
"name": "Go",
"bytes": "181529"
},
{
"name": "Jinja",
"bytes": "25855"
},
{
"name": "Makefile",
"bytes": "76329"
},
{
"name": "Python",
"bytes": "9758796"
},
{
"name": "RobotFramework",
"bytes": "10188"
},
{
"name": "Ruby",
"bytes": "1126"
},
{
"name": "Shell",
"bytes": "758475"
},
{
"name": "XSLT",
"bytes": "175917"
}
],
"symlink_target": ""
} |
import warnings
from datetime import datetime, timedelta
import datetime as pydt
import numpy as np
from dateutil.relativedelta import relativedelta
import matplotlib.units as units
import matplotlib.dates as dates
from matplotlib.ticker import Formatter, AutoLocator, Locator
from matplotlib.transforms import nonsingular
from pandas._libs import tslibs
from pandas._libs.tslibs import resolution
from pandas.core.dtypes.common import (
is_float, is_integer,
is_integer_dtype,
is_float_dtype,
is_datetime64_ns_dtype,
is_period_arraylike,
is_nested_list_like
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.compat import lrange
import pandas.compat as compat
import pandas.core.common as com
from pandas.core.index import Index
from pandas.core.indexes.datetimes import date_range
import pandas.core.tools.datetimes as tools
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import FreqGroup
from pandas.core.indexes.period import Period, PeriodIndex
# constants
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
MUSEC_PER_DAY = 1e6 * SEC_PER_DAY
_WARN = True # Global for whether pandas has registered the units explicitly
_mpl_units = {} # Cache for units overwritten by us
def get_pairs():
pairs = [
(tslibs.Timestamp, DatetimeConverter),
(Period, PeriodConverter),
(pydt.datetime, DatetimeConverter),
(pydt.date, DatetimeConverter),
(pydt.time, TimeConverter),
(np.datetime64, DatetimeConverter),
]
return pairs
def register(explicit=True):
"""Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter
"""
# Renamed in pandas.plotting.__init__
global _WARN
if explicit:
_WARN = False
pairs = get_pairs()
for type_, cls in pairs:
converter = cls()
if type_ in units.registry:
previous = units.registry[type_]
_mpl_units[type_] = previous
units.registry[type_] = converter
def deregister():
"""Remove pandas' formatters and converters
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
deregister_matplotlib_converters
"""
# Renamed in pandas.plotting.__init__
for type_, cls in get_pairs():
# We use type to catch our classes directly, no inheritance
if type(units.registry.get(type_)) is cls:
units.registry.pop(type_)
# restore the old keys
for unit, formatter in _mpl_units.items():
if type(formatter) not in {DatetimeConverter, PeriodConverter,
TimeConverter}:
# make it idempotent by excluding ours.
units.registry[unit] = formatter
def _check_implicitly_registered():
global _WARN
if _WARN:
msg = ("Using an implicitly registered datetime converter for a "
"matplotlib plotting method. The converter was registered "
"by pandas on import. Future versions of pandas will require "
"you to explicitly register matplotlib converters.\n\n"
"To register the converters:\n\t"
">>> from pandas.plotting import register_matplotlib_converters"
"\n\t"
">>> register_matplotlib_converters()")
warnings.warn(msg, FutureWarning)
_WARN = False
def _to_ordinalf(tm):
tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +
float(tm.microsecond / 1e6))
return tot_sec
def time2num(d):
if isinstance(d, compat.string_types):
parsed = tools.to_datetime(d)
if not isinstance(parsed, datetime):
raise ValueError('Could not parse time {d}'.format(d=d))
return _to_ordinalf(parsed.time())
if isinstance(d, pydt.time):
return _to_ordinalf(d)
return d
class TimeConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
valid_types = (str, pydt.time)
if (isinstance(value, valid_types) or is_integer(value) or
is_float(value)):
return time2num(value)
if isinstance(value, Index):
return value.map(time2num)
if isinstance(value, (list, tuple, np.ndarray, Index)):
return [time2num(x) for x in value]
return value
@staticmethod
def axisinfo(unit, axis):
if unit != 'time':
return None
majloc = AutoLocator()
majfmt = TimeFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time')
@staticmethod
def default_units(x, axis):
return 'time'
# time formatter
class TimeFormatter(Formatter):
def __init__(self, locs):
self.locs = locs
def __call__(self, x, pos=0):
"""
Return the time of day as a formatted string.
Parameters
----------
x : float
The time of day specified as seconds since 00:00 (midnight),
with up to microsecond precision.
pos
Unused
Returns
-------
str
A string in HH:MM:SS.mmmuuu format. Microseconds,
milliseconds and seconds are only displayed if non-zero.
"""
fmt = '%H:%M:%S.%f'
s = int(x)
msus = int(round((x - s) * 1e6))
ms = msus // 1000
us = msus % 1000
m, s = divmod(s, 60)
h, m = divmod(m, 60)
_, h = divmod(h, 24)
if us != 0:
return pydt.time(h, m, s, msus).strftime(fmt)
elif ms != 0:
return pydt.time(h, m, s, msus).strftime(fmt)[:-3]
elif s != 0:
return pydt.time(h, m, s).strftime('%H:%M:%S')
return pydt.time(h, m).strftime('%H:%M')
# Period Conversion
class PeriodConverter(dates.DateConverter):
@staticmethod
def convert(values, units, axis):
if is_nested_list_like(values):
values = [PeriodConverter._convert_1d(v, units, axis)
for v in values]
else:
values = PeriodConverter._convert_1d(values, units, axis)
return values
@staticmethod
def _convert_1d(values, units, axis):
if not hasattr(axis, 'freq'):
raise TypeError('Axis must have `freq` set to convert to Periods')
valid_types = (compat.string_types, datetime,
Period, pydt.date, pydt.time, np.datetime64)
if (isinstance(values, valid_types) or is_integer(values) or
is_float(values)):
return get_datevalue(values, axis.freq)
if isinstance(values, PeriodIndex):
return values.asfreq(axis.freq)._ndarray_values
if isinstance(values, Index):
return values.map(lambda x: get_datevalue(x, axis.freq))
if is_period_arraylike(values):
return PeriodIndex(values, freq=axis.freq)._ndarray_values
if isinstance(values, (list, tuple, np.ndarray, Index)):
return [get_datevalue(x, axis.freq) for x in values]
return values
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (compat.string_types, datetime,
pydt.date, pydt.time, np.datetime64)):
return Period(date, freq).ordinal
elif (is_integer(date) or is_float(date) or
(isinstance(date, (np.ndarray, Index)) and (date.size == 1))):
return date
elif date is None:
return None
raise ValueError("Unrecognizable date '{date}'".format(date=date))
def _dt_to_float_ordinal(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if (isinstance(dt, (np.ndarray, Index, ABCSeries)
) and is_datetime64_ns_dtype(dt)):
base = dates.epoch2num(dt.asi8 / 1.0E9)
else:
base = dates.date2num(dt)
return base
# Datetime Conversion
class DatetimeConverter(dates.DateConverter):
@staticmethod
def convert(values, unit, axis):
# values might be a 1-d array, or a list-like of arrays.
_check_implicitly_registered()
if is_nested_list_like(values):
values = [DatetimeConverter._convert_1d(v, unit, axis)
for v in values]
else:
values = DatetimeConverter._convert_1d(values, unit, axis)
return values
@staticmethod
def _convert_1d(values, unit, axis):
def try_parse(values):
try:
return _dt_to_float_ordinal(tools.to_datetime(values))
except Exception:
return values
if isinstance(values, (datetime, pydt.date)):
return _dt_to_float_ordinal(values)
elif isinstance(values, np.datetime64):
return _dt_to_float_ordinal(tslibs.Timestamp(values))
elif isinstance(values, pydt.time):
return dates.date2num(values)
elif (is_integer(values) or is_float(values)):
return values
elif isinstance(values, compat.string_types):
return try_parse(values)
elif isinstance(values, (list, tuple, np.ndarray, Index, ABCSeries)):
if isinstance(values, ABCSeries):
# https://github.com/matplotlib/matplotlib/issues/11391
# Series was skipped. Convert to DatetimeIndex to get asi8
values = Index(values)
if isinstance(values, Index):
values = values.values
if not isinstance(values, np.ndarray):
values = com.asarray_tuplesafe(values)
if is_integer_dtype(values) or is_float_dtype(values):
return values
try:
values = tools.to_datetime(values)
if isinstance(values, Index):
values = _dt_to_float_ordinal(values)
else:
values = [_dt_to_float_ordinal(x) for x in values]
except Exception:
values = _dt_to_float_ordinal(values)
return values
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = PandasAutoDateLocator(tz=tz)
majfmt = PandasAutoDateFormatter(majloc, tz=tz)
datemin = pydt.date(2000, 1, 1)
datemax = pydt.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
class PandasAutoDateFormatter(dates.AutoDateFormatter):
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)
# matplotlib.dates._UTC has no _utcoffset called by pandas
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
_check_implicitly_registered()
delta = relativedelta(dmax, dmin)
num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
def _get_unit(self):
return MilliSecondLocator.get_unit_generic(self._freq)
class MilliSecondLocator(dates.DateLocator):
UNIT = 1. / (24 * 3600 * 1000)
def __init__(self, tz):
dates.DateLocator.__init__(self, tz)
self._interval = 1.
def _get_unit(self):
return self.get_unit_generic(-1)
@staticmethod
def get_unit_generic(freq):
unit = dates.RRuleLocator.get_unit_generic(freq)
if unit < 0:
return MilliSecondLocator.UNIT
return unit
def __call__(self):
# if no data have been set, this will tank with a ValueError
_check_implicitly_registered()
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
else:
# We went through the whole loop without breaking, default to 1
self._interval = 1000.
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(('MillisecondLocator estimated to generate '
'{estimate:d} ticks from {dmin} to {dmax}: '
'exceeds Locator.MAXTICKS'
'* 2 ({arg:d}) ').format(
estimate=estimate, dmin=dmin, dmax=dmax,
arg=self.MAXTICKS * 2))
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed,
freq=freq, tz=tz).astype(object)
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(dates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = dates.date2num([dmin, dmax])
return lims
def _get_interval(self):
return self._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
def _from_ordinal(x, tz=None):
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),
int(second), microsecond)
if tz is not None:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += timedelta(microseconds=1e6 - microsecond)
return dt
# Fixed frequency dynamic tick locators and formatters
# -------------------------------------------------------------------------
# --- Locators ---
# -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1 * dates.freq, period)
return np.nonzero(current - previous)[0]
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FreqGroup.FR_HR:
if freq == FreqGroup.FR_NS:
periodsperday = 24 * 60 * 60 * 1000000000
elif freq == FreqGroup.FR_US:
periodsperday = 24 * 60 * 60 * 1000000
elif freq == FreqGroup.FR_MS:
periodsperday = 24 * 60 * 60 * 1000
elif freq == FreqGroup.FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FreqGroup.FR_MIN:
periodsperday = 24 * 60
elif freq == FreqGroup.FR_HR:
periodsperday = 24
else: # pragma: no cover
raise ValueError("unexpected frequency: {freq}".format(freq=freq))
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
elif freq == FreqGroup.FR_BUS:
periodsperyear = 261
periodspermonth = 19
elif freq == FreqGroup.FR_DAY:
periodsperyear = 365
periodspermonth = 28
elif resolution.get_freq_group(freq) == FreqGroup.FR_WK:
periodsperyear = 52
periodspermonth = 3
else: # pragma: no cover
raise ValueError("unexpected frequency")
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (Period(ordinal=int(vmin), freq=freq),
Period(ordinal=int(vmax), freq=freq))
span = vmax.ordinal - vmin.ordinal + 1
dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)
# Initialize the output
info = np.zeros(span,
dtype=[('val', np.int64), ('maj', bool),
('min', bool), ('fmt', '|S20')])
info['val'][:] = dates_._ndarray_values
info['fmt'][:] = ''
info['maj'][[0, -1]] = True
# .. and set some shortcuts
info_maj = info['maj']
info_min = info['min']
info_fmt = info['fmt']
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and \
((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = period_break(dates_, 'day')
month_start = period_break(dates_, 'month')
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
_prev_hour = (dates_ - 1 * dates_.freq).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y'
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
_prev_minute = (dates_ - 1 * dates_.freq).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
_prev_second = (dates_ - 1 * dates_.freq).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[second_start & (_second %
label_interval == 0)] = '%H:%M:%S'
info_fmt[day_start] = '%H:%M:%S\n%d-%b'
info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y'
if span < periodsperday / 12000.0:
_second_finder(1)
elif span < periodsperday / 6000.0:
_second_finder(2)
elif span < periodsperday / 2400.0:
_second_finder(5)
elif span < periodsperday / 1200.0:
_second_finder(10)
elif span < periodsperday / 800.0:
_second_finder(15)
elif span < periodsperday / 400.0:
_second_finder(30)
elif span < periodsperday / 150.0:
_minute_finder(1)
elif span < periodsperday / 70.0:
_minute_finder(2)
elif span < periodsperday / 24.0:
_minute_finder(5)
elif span < periodsperday / 12.0:
_minute_finder(15)
elif span < periodsperday / 6.0:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
elif span < periodsperday / 1.5:
_hour_finder(2, False)
elif span < periodsperday * 1.25:
_hour_finder(3, False)
elif span < periodsperday * 2.5:
_hour_finder(6, True)
elif span < periodsperday * 4:
_hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[day_start] = '%d'
info_fmt[month_start] = '%d\n%b'
info_fmt[year_start] = '%d\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = '%d\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '%d\n%b\n%Y'
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = period_break(dates_, 'month')
info_maj[month_start] = True
if freq < FreqGroup.FR_HR:
info['min'] = True
else:
day_start = period_break(dates_, 'day')
info['min'][day_start] = True
week_start = period_break(dates_, 'week')
year_start = period_break(dates_, 'year')
info_fmt[week_start] = '%d'
info_fmt[month_start] = '\n\n%b'
info_fmt[year_start] = '\n\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = '\n\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '\n\n%b\n%Y'
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
week_start = period_break(dates_, 'week')
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = '%b\n%Y'
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
month_start = period_break(dates_, 'month')
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = '%Y'
# Case 6. More than 12 years ................
else:
year_start = period_break(dates_, 'year')
year_break = dates_[year_start].year
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = '%Y'
return info
def _monthly_finder(vmin, vmax, freq):
periodsperyear = 12
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
dates_ = info['val']
info['fmt'] = ''
year_start = (dates_ % 12 == 0).nonzero()[0]
info_maj = info['maj']
info_fmt = info['fmt']
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = '%b\n%Y'
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
info['fmt'][quarter_start] = True
info['min'] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
info['min'][quarter_start] = True
info_fmt[year_start] = '%Y'
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%Y'
return info
def _quarterly_finder(vmin, vmax, freq):
periodsperyear = 4
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
info_maj = info['maj']
info_fmt = info['fmt']
year_start = (dates_ % 4 == 0).nonzero()[0]
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = 'Q%q'
info_fmt[year_start] = 'Q%q\n%F'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = 'Q%q\n%F'
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[year_start] = '%F'
else:
years = dates_[year_start] // 4 + 1
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%F'
return info
def _annual_finder(vmin, vmax, freq):
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
info['maj'][major_idx] = True
info['min'][(dates_ % min_anndef == 0)] = True
info['fmt'][major_idx] = '%Y'
return info
def get_finder(freq):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
fgroup = resolution.get_freq_group(freq)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
elif freq == FreqGroup.FR_MTH:
return _monthly_finder
elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):
return _daily_finder
else: # pragma: no cover
errmsg = "Unsupported frequency: {freq}".format(freq=freq)
raise NotImplementedError(errmsg)
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
Parameters
----------
freq : {var}
Valid frequency specifier.
minor_locator : {False, True}, optional
Whether the locator is for minor ticks (True) or not.
dynamic_mode : {True, False}, optional
Whether the locator should work in dynamic mode.
base : {int}, optional
quarter : {int}, optional
month : {int}, optional
day : {int}, optional
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
base=1, quarter=1, month=1, day=1, plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.freq = freq
self.base = base
(self.quarter, self.month, self.day) = (quarter, month, day)
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
def __call__(self):
'Return the locations of the ticks.'
# axis calls Locator.set_axis inside set_m<xxxx>_formatter
_check_implicitly_registered()
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
vmin, vmax = vi
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
else: # pragma: no cover
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
locs = lrange(vmin, vmax + 1, base)
return locs
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
# -------------------------------------------------------------------------
# --- Formatter ---
# -------------------------------------------------------------------------
class TimeSeries_DateFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`PeriodIndex`.
Parameters
----------
freq : {int, string}
Valid frequency specifier.
minor_locator : {False, True}
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
dynamic_mode : {True, False}
Whether the formatter works in dynamic mode or not.
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.format = None
self.freq = freq
self.locs = []
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = {x: f for (x, _, _, f) in format}
return self.formatdict
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
_check_implicitly_registered()
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
def __call__(self, x, pos=0):
_check_implicitly_registered()
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
class TimeSeries_TimedeltaFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`.
"""
@staticmethod
def format_timedelta_ticks(x, pos, n_decimals):
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
s, ns = divmod(x, 1e9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10**(n_decimals - 9))
s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))
if n_decimals > 0:
s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)
if d != 0:
s = '{:d} days '.format(int(d)) + s
return s
def __call__(self, x, pos=0):
_check_implicitly_registered()
(vmin, vmax) = tuple(self.axis.get_view_interval())
n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
| {
"content_hash": "034bd58bd74755af4ba3d1b3fbff7e1a",
"timestamp": "",
"source": "github",
"line_count": 1159,
"max_line_length": 79,
"avg_line_length": 33.442622950819676,
"alnum_prop": 0.5568369453044376,
"repo_name": "harisbal/pandas",
"id": "444b742ae706e455f1d37b63b8f827868487f97b",
"size": "38760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/plotting/_converter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14298777"
},
{
"name": "Shell",
"bytes": "28914"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from distutils.extension import Extension
import numpy
from Cython.Build import cythonize
setup(
ext_modules = cythonize([
Extension("shared",
["shared.pyx"],
language="c++",
extra_compile_args=["-O0"],
include_dirs=[numpy.get_include()],
libraries=["lbswim"]),
Extension("lb",
["lb.pyx"],
language="c++",
extra_compile_args=["-O0"],
include_dirs=[numpy.get_include()],
libraries=["lbswim"]),
Extension("swimmers",
["swimmers.pyx"],
language="c++",
extra_compile_args=["-O0"],
include_dirs=[numpy.get_include()],
libraries=["lbswim"])
])
)
| {
"content_hash": "371a061f0b5188b5f8c3c25fa5670bc5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 53,
"avg_line_length": 32.96296296296296,
"alnum_prop": 0.451685393258427,
"repo_name": "rupertnash/gpu-swimmers",
"id": "7cabc09354dd4c3c912ed21d46d412d032d0295e",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19914"
},
{
"name": "C++",
"bytes": "2525"
},
{
"name": "CMake",
"bytes": "31886"
},
{
"name": "Cuda",
"bytes": "23206"
},
{
"name": "Makefile",
"bytes": "560"
},
{
"name": "Python",
"bytes": "21185"
}
],
"symlink_target": ""
} |
import datetime
import flask
import json
import pytest
import re
from bs4 import BeautifulSoup
import dash_dangerously_set_inner_html
import dash_flow_example
import dash
from dash import Dash, html, dcc, Input, Output
from dash.exceptions import PreventUpdate
from dash.testing.wait import until
def test_inin003_wildcard_data_attributes(dash_duo):
app = Dash()
test_time = datetime.datetime(2012, 1, 10, 2, 3)
test_date = datetime.date(test_time.year, test_time.month, test_time.day)
attrs = {
"id": "inner-element",
"data-string": "multiple words",
"data-number": 512,
"data-none": None,
"data-date": test_date,
"aria-progress": 5,
}
app.layout = html.Div([html.Div(**attrs)], id="data-element")
dash_duo.start_server(app)
div = dash_duo.find_element("#data-element")
# attribute order is ill-defined - BeautifulSoup will sort them
actual = BeautifulSoup(div.get_attribute("innerHTML"), "lxml").decode()
expected = BeautifulSoup(
"<div "
+ " ".join('{}="{!s}"'.format(k, v) for k, v in attrs.items() if v is not None)
+ "></div>",
"lxml",
).decode()
assert actual == expected, "all attrs are included except None values"
assert dash_duo.get_logs() == []
def test_inin004_no_props_component(dash_duo):
app = Dash()
app.layout = html.Div(
[
dash_dangerously_set_inner_html.DangerouslySetInnerHTML(
"""
<h1>No Props Component</h1>
"""
)
],
id="app",
)
dash_duo.start_server(app)
assert dash_duo.get_logs() == []
assert dash_duo.find_element("h1").text == "No Props Component"
inner = dash_duo.find_element("#app").get_property("innerHTML")
expected = "<div> <h1>No Props Component</h1> </div>"
assert re.sub("\\s+", " ", inner) == expected
def test_inin005_flow_component(dash_duo):
app = Dash()
app.layout = html.Div(
[
dash_flow_example.ExampleReactComponent(
id="react", value="my-value", label="react component"
),
dash_flow_example.ExampleFlowComponent(
id="flow", value="my-value", label="flow component"
),
html.Hr(),
html.Div(id="output"),
]
)
@app.callback(
Output("output", "children"), [Input("react", "value"), Input("flow", "value")]
)
def display_output(react_value, flow_value):
return html.Div(
[
"You have entered {} and {}".format(react_value, flow_value),
html.Hr(),
html.Label("Flow Component Docstring"),
html.Pre(dash_flow_example.ExampleFlowComponent.__doc__),
html.Hr(),
html.Label("React PropTypes Component Docstring"),
html.Pre(dash_flow_example.ExampleReactComponent.__doc__),
html.Div(id="waitfor"),
]
)
dash_duo.start_server(app)
dash_duo.wait_for_element("#waitfor")
dash_duo.percy_snapshot(name="flowtype")
def test_inin006_meta_tags(dash_duo):
metas = [
{"name": "description", "content": "my dash app"},
{"name": "custom", "content": "customized"},
]
app = Dash(meta_tags=metas)
app.layout = html.Div(id="content")
dash_duo.start_server(app)
meta = dash_duo.find_elements("meta")
# -3 for the meta charset, http-equiv and viewport.
assert len(meta) == len(metas) + 3, "Should have 3 extra meta tags"
for i in range(3, len(meta)):
meta_tag = meta[i]
meta_info = metas[i - 3]
assert meta_tag.get_attribute("name") == meta_info["name"]
assert meta_tag.get_attribute("content") == meta_info["content"]
def test_inin007_change_viewport_meta_tag(dash_duo):
"""
As of dash 2.5 the default viewport meta tag is:
[{"name": "viewport", "content": "width=device-width, initial-scale=1"}]
Test verifies that this feature can be disabled by using an empty viewport tag.
"""
app = Dash(meta_tags=[{"name": "viewport"}])
app.layout = html.Div(id="content")
dash_duo.start_server(app)
viewport_meta = dash_duo.find_elements('meta[name="viewport"]')
assert len(viewport_meta) == 1, "Should have 1 viewport meta tags"
assert viewport_meta[0].get_attribute("content") == ""
def test_inin008_index_customization(dash_duo):
app = Dash()
app.index_string = """<!DOCTYPE html>
<html>
<head>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
</head>
<body>
<div id="custom-header">My custom header</div>
<div id="add"></div>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
{%renderer%}
</footer>
<div id="custom-footer">My custom footer</div>
<script>
// Test the formatting doesn't mess up script tags.
var elem = document.getElementById('add');
if (!elem) {
throw Error('could not find container to add');
}
elem.innerHTML = 'Got added';
var config = {};
fetch('/nonexist').then(r => r.json())
.then(r => config = r).catch(err => ({config}));
</script>
</body>
</html>"""
app.layout = html.Div("Dash app", id="app")
dash_duo.start_server(app)
assert dash_duo.find_element("#custom-header").text == "My custom header"
assert dash_duo.find_element("#custom-footer").text == "My custom footer"
assert dash_duo.find_element("#app").text == "Dash app"
assert dash_duo.wait_for_element("#add").text == "Got added"
assert dash_duo.get_logs() == []
def test_inin009_invalid_index_string(dash_duo):
app = Dash()
def will_raise():
app.index_string = """<!DOCTYPE html>
<html>
<head>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
</head>
<body>
<div id="custom-header">My custom header</div>
<div id="add"></div>
<footer>
</footer>
</body>
</html>"""
with pytest.raises(Exception) as err:
will_raise()
exc_msg = str(err.value)
assert "{%app_entry%}" in exc_msg
assert "{%config%}" in exc_msg
assert "{%scripts%}" in exc_msg
app.layout = html.Div("Hello World", id="a")
dash_duo.start_server(app)
assert dash_duo.find_element("#a").text == "Hello World"
def test_inin010_func_layout_accepted(dash_duo):
app = Dash()
def create_layout():
return html.Div("Hello World", id="a")
app.layout = create_layout
dash_duo.start_server(app)
assert dash_duo.find_element("#a").text == "Hello World"
def test_inin017_late_component_register(dash_duo):
app = Dash()
app.layout = html.Div(
[html.Button("Click me to put a dcc ", id="btn-insert"), html.Div(id="output")]
)
@app.callback(Output("output", "children"), [Input("btn-insert", "n_clicks")])
def update_output(value):
if value is None:
raise PreventUpdate
return dcc.Input(id="inserted-input")
dash_duo.start_server(app)
btn = dash_duo.find_element("#btn-insert")
btn.click()
dash_duo.find_element("#inserted-input")
def test_inin_024_port_env_success(dash_duo):
app = Dash(__name__)
app.layout = html.Div("hi", "out")
dash_duo.start_server(app, port="12345")
assert dash_duo.server_url == "http://localhost:12345"
dash_duo.wait_for_text_to_equal("#out", "hi")
def nested_app(server, path, text):
app = Dash(__name__, server=server, url_base_pathname=path)
app.layout = html.Div(id="out")
@app.callback(Output("out", "children"), [Input("out", "n_clicks")])
def out(n):
return text
return app
def test_inin025_url_base_pathname(dash_br, dash_thread_server):
server = flask.Flask(__name__)
app = nested_app(server, "/app1/", "The first")
nested_app(server, "/app2/", "The second")
dash_thread_server(app)
dash_br.server_url = "http://localhost:{}/app1/".format(dash_thread_server.port)
dash_br.wait_for_text_to_equal("#out", "The first")
dash_br.server_url = "http://localhost:{}/app2/".format(dash_thread_server.port)
dash_br.wait_for_text_to_equal("#out", "The second")
def test_inin026_graphs_in_tabs_do_not_share_state(dash_duo):
app = Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div(
[
dcc.Tabs(
id="tabs",
children=[
dcc.Tab(label="Tab 1", value="tab1", id="tab1"),
dcc.Tab(label="Tab 2", value="tab2", id="tab2"),
],
value="tab1",
),
# Tab content
html.Div(id="tab_content"),
]
)
tab1_layout = [
html.Div(
[
dcc.Graph(
id="graph1",
figure={"data": [{"x": [1, 2, 3], "y": [5, 10, 6], "type": "bar"}]},
)
]
),
html.Pre(id="graph1_info"),
]
tab2_layout = [
html.Div(
[
dcc.Graph(
id="graph2",
figure={"data": [{"x": [4, 3, 2], "y": [5, 10, 6], "type": "bar"}]},
)
]
),
html.Pre(id="graph2_info"),
]
@app.callback(Output("graph1_info", "children"), Input("graph1", "clickData"))
def display_hover_data(hover_data):
return json.dumps(hover_data)
@app.callback(Output("graph2_info", "children"), Input("graph2", "clickData"))
def display_hover_data_2(hover_data):
return json.dumps(hover_data)
@app.callback(Output("tab_content", "children"), Input("tabs", "value"))
def render_content(tab):
return tab2_layout if tab == "tab2" else tab1_layout
dash_duo.start_server(app)
dash_duo.find_element("#graph1:not(.dash-graph--pending)").click()
until(lambda: '"label": 2' in dash_duo.find_element("#graph1_info").text, timeout=3)
dash_duo.find_element("#tab2").click()
dash_duo.find_element("#graph2:not(.dash-graph--pending)").click()
until(lambda: '"label": 3' in dash_duo.find_element("#graph2_info").text, timeout=3)
def test_inin027_multi_page_without_pages_folder(dash_duo):
app = Dash(__name__, pages_folder="")
# test for storing arbitrary keyword arguments: An `id` prop is defined for every page
# test for defining multiple pages within a single file: layout is passed directly to `register_page`
# in the following two modules:
dash.register_page(
"multi_layout1",
layout=html.Div("text for multi_layout1", id="text_multi_layout1"),
path="/",
title="Supplied Title",
description="This is the supplied description",
name="Supplied name",
image="birds.jpeg",
id="multi_layout1",
)
dash.register_page(
"multi_layout2",
layout=html.Div("text for multi_layout2", id="text_multi_layout2"),
path="/layout2",
id="multi_layout2",
)
dash.register_page(
"not_found_404",
layout=html.Div("text for not_found_404", id="text_not_found_404"),
id="not_found_404",
)
app.layout = html.Div(
[
html.Div(
[
html.Div(
dcc.Link(
f"{page['name']} - {page['path']}",
id=page["id"],
href=page["path"],
)
)
for page in dash.page_registry.values()
]
),
dash.page_container,
]
)
dash_duo.start_server(app)
# test layout and title for each page in `page_registry` with link navigation
for page in dash.page_registry.values():
dash_duo.find_element("#" + page["id"]).click()
dash_duo.wait_for_text_to_equal("#text_" + page["id"], "text for " + page["id"])
assert dash_duo.driver.title == page["title"], "check that page title updates"
# test registration of not_found_404
assert "not_found_404" in dash.page_registry.keys(), "check custom not_found_404"
# clean up so this page doesn't affect other tests
del dash.page_registry["not_found_404"]
assert not dash_duo.get_logs()
| {
"content_hash": "fd6bbc10cfb4bcc016291da7260e8247",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 105,
"avg_line_length": 29.762237762237763,
"alnum_prop": 0.5495770676691729,
"repo_name": "plotly/dash",
"id": "ab88a45b8f3d9e259e964be9a45bbb3953575675",
"size": "12768",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/integration/test_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17191"
},
{
"name": "HTML",
"bytes": "1729"
},
{
"name": "JavaScript",
"bytes": "638735"
},
{
"name": "Less",
"bytes": "22320"
},
{
"name": "Python",
"bytes": "1304969"
},
{
"name": "Shell",
"bytes": "224"
},
{
"name": "TypeScript",
"bytes": "840257"
}
],
"symlink_target": ""
} |
"""port_erspan_extension
Revision ID: 016a678fafd4
Revises: bda3c34581e0
Create Date: 2020-11-03 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = '016a678fafd4'
down_revision = 'bda3c34581e0'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'apic_aim_port_erspan_configurations',
sa.Column('port_id', sa.String(36), nullable=False),
sa.Column('dest_ip', sa.String(64), nullable=False),
sa.Column('flow_id', sa.Integer, nullable=False),
sa.Column('direction', sa.Enum('in', 'out', 'both'), nullable=False),
sa.ForeignKeyConstraint(
['port_id'], ['ports.id'],
name='apic_aim_port_erspan_extensions_fk_port',
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id', 'dest_ip', 'flow_id', 'direction'))
def downgrade():
pass
| {
"content_hash": "bffdb8dc44fe6f7ce9680641964f5c66",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 27.53125,
"alnum_prop": 0.6424517593643587,
"repo_name": "noironetworks/group-based-policy",
"id": "8c8c8a885805351cff9bdf0415e24489edf43a8c",
"size": "1456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbpservice/neutron/db/migration/alembic_migrations/versions/016a678fafd4_erspan_extension.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1893"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3947895"
},
{
"name": "Shell",
"bytes": "31729"
}
],
"symlink_target": ""
} |
import os
from glob import glob
from django import template
from django.conf import settings
register = template.Library()
STATIC_ROOT = settings.STATIC_ROOT
CSS_ROOT = os.path.join(settings.STATIC_ROOT, 'css/')
LINK_TAG = '<link href="%s" rel="stylesheet" type="text/css">'
@register.simple_tag
def all_stylesheets():
return stylesheets('.')
@register.simple_tag
def stylesheets(directory):
links = []
root = os.path.join(CSS_ROOT, os.path.normpath(directory), '*.css')
for css_file in glob(root):
if not os.path.isfile(css_file): continue
ressource_url = '/%s' % os.path.relpath(css_file)
links.append(LINK_TAG % ressource_url)
return '\n'.join(links) | {
"content_hash": "bbf34a128080e31063a8004eb980de6d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 27.03846153846154,
"alnum_prop": 0.6842105263157895,
"repo_name": "JohnRandom/django-aggregator",
"id": "e6e733f7019ef9c7e6dd5b1e97c912382d3ede98",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dasdocc/aggregator/templatetags/import.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "600"
},
{
"name": "Python",
"bytes": "86029"
}
],
"symlink_target": ""
} |
import os
import sys
from config.db import MongoDB
from config.config import Production
from config.config import Staging
from config.config import Development
from config.config import Testing
try:
env = os.environ['FLASK_ENV']
except KeyError as e:
sys.exit('Please set the environment key FLASK_ENV to Production/Staging/Development/Testing')
class Environment(object):
def __init__(self):
global env
if env not in ('Production', 'Staging', 'Development', 'Testing'):
print('Invalid environment key, defaulting to Development')
env = 'Development'
if env == 'Production':
self.config = Production()
elif env == 'Staging':
self.config = Staging()
elif env == 'Testing':
self.config = Testing()
else:
self.config = Development()
self.mdb = MongoDB()
environment = Environment()
| {
"content_hash": "36216c352b2d5ac0d5f59a2a3619fa58",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 98,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.6368534482758621,
"repo_name": "wemoo/wemoo-center",
"id": "40bfabd306ae1e3fe67339abe0b205d671dd8bd4",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "41"
},
{
"name": "Python",
"bytes": "16625"
},
{
"name": "Shell",
"bytes": "1323"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use :mod:`airflow.providers.microsoft.azure.secrets.key_vault`."""
import warnings
from airflow.providers.microsoft.azure.secrets.key_vault import AzureKeyVaultBackend # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.microsoft.azure.secrets.key_vault`.",
DeprecationWarning,
stacklevel=2,
)
| {
"content_hash": "177504ac5f30f790014edc5c34a6bc6d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 103,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.7706666666666667,
"repo_name": "danielvdende/incubator-airflow",
"id": "000ae92b3ac28485db7df9441de3c76d833a95b4",
"size": "1163",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/contrib/secrets/azure_key_vault.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
"""private module containing functions used for copying data
between instances based on join conditions.
"""
from . import attributes
from . import exc
from . import util as orm_util
from .. import util
def populate(
source,
source_mapper,
dest,
dest_mapper,
synchronize_pairs,
uowcommit,
flag_cascaded_pks,
):
source_dict = source.dict
dest_dict = dest.dict
for l, r in synchronize_pairs:
try:
# inline of source_mapper._get_state_attr_by_column
prop = source_mapper._columntoproperty[l]
value = source.manager[prop.key].impl.get(
source, source_dict, attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, dest_mapper, r, err)
try:
# inline of dest_mapper._set_state_attr_by_column
prop = dest_mapper._columntoproperty[r]
dest.manager[prop.key].impl.set(dest, dest_dict, value, None)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(True, source_mapper, l, dest_mapper, r, err)
# technically the "r.primary_key" check isn't
# needed here, but we check for this condition to limit
# how often this logic is invoked for memory/performance
# reasons, since we only need this info for a primary key
# destination.
if (
flag_cascaded_pks
and l.primary_key
and r.primary_key
and r.references(l)
):
uowcommit.attributes[("pk_cascaded", dest, r)] = True
def bulk_populate_inherit_keys(source_dict, source_mapper, synchronize_pairs):
# a simplified version of populate() used by bulk insert mode
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
value = source_dict[prop.key]
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, source_mapper, r, err)
try:
prop = source_mapper._columntoproperty[r]
source_dict[prop.key] = value
except exc.UnmappedColumnError:
_raise_col_to_prop(True, source_mapper, l, source_mapper, r)
def clear(dest, dest_mapper, synchronize_pairs):
for l, r in synchronize_pairs:
if (
r.primary_key
and dest_mapper._get_state_attr_by_column(dest, dest.dict, r)
not in orm_util._none_set
):
raise AssertionError(
"Dependency rule tried to blank-out primary key "
"column '%s' on instance '%s'" % (r, orm_util.state_str(dest))
)
try:
dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(True, None, l, dest_mapper, r, err)
def update(source, source_mapper, dest, old_prefix, synchronize_pairs):
for l, r in synchronize_pairs:
try:
oldvalue = source_mapper._get_committed_attr_by_column(
source.obj(), l
)
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
dest[r.key] = value
dest[old_prefix + r.key] = oldvalue
def populate_dict(source, source_mapper, dict_, synchronize_pairs):
for l, r in synchronize_pairs:
try:
value = source_mapper._get_state_attr_by_column(
source, source.dict, l, passive=attributes.PASSIVE_OFF
)
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
dict_[r.key] = value
def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
"""return true if the source object has changes from an old to a
new value on the given synchronize pairs
"""
for l, r in synchronize_pairs:
try:
prop = source_mapper._columntoproperty[l]
except exc.UnmappedColumnError as err:
_raise_col_to_prop(False, source_mapper, l, None, r, err)
history = uowcommit.get_attribute_history(
source, prop.key, attributes.PASSIVE_NO_INITIALIZE
)
if bool(history.deleted):
return True
else:
return False
def _raise_col_to_prop(
isdest, source_mapper, source_column, dest_mapper, dest_column, err
):
if isdest:
util.raise_(
exc.UnmappedColumnError(
"Can't execute sync rule for "
"destination column '%s'; mapper '%s' does not map "
"this column. Try using an explicit `foreign_keys` "
"collection which does not include this column (or use "
"a viewonly=True relation)." % (dest_column, dest_mapper)
),
replace_context=err,
)
else:
util.raise_(
exc.UnmappedColumnError(
"Can't execute sync rule for "
"source column '%s'; mapper '%s' does not map this "
"column. Try using an explicit `foreign_keys` "
"collection which does not include destination column "
"'%s' (or use a viewonly=True relation)."
% (source_column, source_mapper, dest_column)
),
replace_context=err,
)
| {
"content_hash": "b4df5613d750f2619a2aa49784301812",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 78,
"avg_line_length": 34.94375,
"alnum_prop": 0.5868359864067251,
"repo_name": "cloudera/hue",
"id": "ceaf54e5d332e1fe3436cd81ce9d0b0013afa2f9",
"size": "5823",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/orm/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import numpy as np
import theano as theano
import theano.tensor as T
from theano.ifelse import ifelse
######################
# PARAM UPDATE FUNCS #
######################
def norm_clip(dW, max_l2_norm=10.0):
"""
Clip theano symbolic var dW to have some max l2 norm.
"""
dW_l2_norm = T.sqrt(T.sum(dW**2.0))
norm_ratio = (max_l2_norm / dW_l2_norm)
clip_factor = ifelse(T.lt(norm_ratio, 1.0), norm_ratio, 1.0)
dW_clipped = dW * clip_factor
return dW_clipped
def get_param_updates(params=None, grads=None, \
alpha=None, beta1=None, beta2=None, it_count=None, \
mom2_init=1e-3, smoothing=1e-6, max_grad_norm=10000.0):
"""
This update has some extra inputs that aren't used. This is just so it
can be called interchangeably with "ADAM" updates.
"""
# make an OrderedDict to hold the updates
updates = OrderedDict()
# alpha is a shared array containing the desired learning rate
lr_t = alpha[0]
for p in params:
# get gradient for parameter p
grad_p = norm_clip(grads[p], max_grad_norm)
# initialize first-order momentum accumulator
mom1_ary = 0.0 * p.get_value(borrow=False)
mom1 = theano.shared(mom1_ary)
# update momentum accumulator
mom1_new = (beta1[0] * mom1) + ((1. - beta1[0]) * grad_p)
# do update
p_new = p - (lr_t * mom1_new)
# apply updates to
updates[p] = p_new
updates[mom1] = mom1_new
return updates | {
"content_hash": "b7e09aef586240b0261bf8ba1ff34f98",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 74,
"avg_line_length": 29.96153846153846,
"alnum_prop": 0.6007702182284981,
"repo_name": "Philip-Bachman/ICML-2015",
"id": "e419f31216f5e32410746dde3271706ad5f4ccdd",
"size": "1558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HelperFuncs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259068"
}
],
"symlink_target": ""
} |
from google.api_core.gapic_v1 import client_info as gapic_client_info
from google.api_core import client_info as http_client_info
import hive_to_bigquery
APPLICATION_NAME = "google-pso-tool/hive-bigquery"
USER_AGENT = "{}/{}".format(APPLICATION_NAME, hive_to_bigquery.__version__)
def get_gapic_client_info():
return gapic_client_info.ClientInfo(user_agent=USER_AGENT)
def get_http_client_info():
return http_client_info.ClientInfo(user_agent=USER_AGENT)
| {
"content_hash": "b625dd2648f4135336f55710bbd3aec7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 27.705882352941178,
"alnum_prop": 0.7537154989384289,
"repo_name": "CloudVLab/professional-services",
"id": "df69a229742ec9cae3d7236cdc024c52efcba43b",
"size": "1047",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/hive-bigquery/hive_to_bigquery/client_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12706"
},
{
"name": "Dockerfile",
"bytes": "6279"
},
{
"name": "Go",
"bytes": "28241"
},
{
"name": "HCL",
"bytes": "23513"
},
{
"name": "HTML",
"bytes": "1228123"
},
{
"name": "Java",
"bytes": "87077"
},
{
"name": "JavaScript",
"bytes": "19579"
},
{
"name": "Makefile",
"bytes": "5250"
},
{
"name": "Python",
"bytes": "1327492"
},
{
"name": "Scala",
"bytes": "298157"
},
{
"name": "Shell",
"bytes": "68560"
},
{
"name": "TSQL",
"bytes": "17166"
},
{
"name": "TypeScript",
"bytes": "137719"
}
],
"symlink_target": ""
} |
from aiojson.backends.python import Buffer, get_tokens
from .data import RAW_DATA, RAW_TOKENS
def test_get_tokens_all():
buf = Buffer(RAW_DATA)
parser = get_tokens(buf, more_data=False)
tokens = list(parser)
assert len(tokens) == len(RAW_TOKENS)
assert tokens == RAW_TOKENS
def test_get_tokens_chunks_split_in_spaces():
chunk1 = RAW_DATA[:30]
chunk2 = RAW_DATA[30:]
validate_get_tokens_reentrant(Buffer(chunk1), Buffer(chunk2))
def test_get_tokens_chunks_split_in_string():
chunk1 = RAW_DATA[:35]
chunk2 = RAW_DATA[35:]
validate_get_tokens_reentrant(Buffer(chunk1), Buffer(chunk2))
def test_get_tokens_chunks_split_in_number():
chunk1 = RAW_DATA[:42]
chunk2 = RAW_DATA[42:]
validate_get_tokens_reentrant(Buffer(chunk1), Buffer(chunk2))
def validate_get_tokens_reentrant(*buffers):
tokens = []
buffer = Buffer('')
for b in buffers:
buffer = buffer + b
tokens += list(get_tokens(buffer))
tokens += list(get_tokens(buffer, False))
unfinished = buffer.search()
assert not unfinished
assert len(tokens) == len(RAW_TOKENS)
assert tokens == RAW_TOKENS
| {
"content_hash": "3a0083e463d5e505c75c28a564fcce7a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 65,
"avg_line_length": 27.61904761904762,
"alnum_prop": 0.6681034482758621,
"repo_name": "ethanfrey/aiojson",
"id": "583ff861c35380da14f6f2c3518412621b202aff",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiojson/backends/tests/test_parse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33294"
}
],
"symlink_target": ""
} |
from clean_topology import cleanup
from create_topology import create_topo
print "\n -- "
cleanup()
create_topo('partial-topology.json')
print "\n -- "
| {
"content_hash": "d17b25eab61b75417949d7fa02f7618f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 39,
"avg_line_length": 19.25,
"alnum_prop": 0.7272727272727273,
"repo_name": "nikitamarchenko/open-kilda",
"id": "63bfd1fdecddcd0e0c544de07722ba9d7288b608",
"size": "776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "services/topology-engine/queue-engine/tests/smoke-tests/create-partial-topology.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43545"
},
{
"name": "Gherkin",
"bytes": "57157"
},
{
"name": "Groovy",
"bytes": "243"
},
{
"name": "HTML",
"bytes": "61822"
},
{
"name": "Java",
"bytes": "2674140"
},
{
"name": "JavaScript",
"bytes": "105371"
},
{
"name": "Makefile",
"bytes": "22000"
},
{
"name": "Python",
"bytes": "359953"
},
{
"name": "Ruby",
"bytes": "1185"
},
{
"name": "Shell",
"bytes": "77403"
}
],
"symlink_target": ""
} |
import sys
import traceback
from core.game import *
from core.functions import *
# Coords format - (y, x)
def main():
try:
game_init();
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info();
tb = traceback.format_exception(exc_type, exc_value, exc_traceback);
string = ''.join(tb);
w_log("last.log", "w", "game_init\n" + string + "\n\n");
w_log("logs.log", "a+", "{}\n\n".format(string));
if (__name__ == "__main__"):
main(); | {
"content_hash": "bf2753c6a100d4500c91820d8ff7583c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 21.227272727272727,
"alnum_prop": 0.6124197002141327,
"repo_name": "Melnick/Cnake",
"id": "9be388ead5a740ba17e822153a3b5019fb231b43",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "51513"
},
{
"name": "Python",
"bytes": "14721"
}
],
"symlink_target": ""
} |
import os
def run(*args):
print('Script called from: %s' % os.getcwd())
| {
"content_hash": "04c11fd5ebc36f0839d51d75f4d5d037",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 15.6,
"alnum_prop": 0.6153846153846154,
"repo_name": "django-extensions/django-extensions",
"id": "cc79d3a885946ef85002b89cf19b84feeefe55f3",
"size": "102",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/testapp_with_no_models_file/scripts/other_directory_checker_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "740"
},
{
"name": "HTML",
"bytes": "2126"
},
{
"name": "JavaScript",
"bytes": "41410"
},
{
"name": "Makefile",
"bytes": "1257"
},
{
"name": "Python",
"bytes": "826197"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import contextlib
from kombu.transport import virtual
from kombu import utils
from stomp import exception as exc
from . import stomp
class Message(virtual.Message):
"""Kombu virtual transport message class for kombu-stomp.
This class extends :py:class:`kombu.transport.virtual.Message`, so it
keeps STOMP message ID for later use.
"""
def __init__(self, channel, raw_message):
# we'll get a message ID only for incoming messages
if isinstance(raw_message, tuple):
raw_message, msg_id = raw_message
self.msg_id = msg_id
else:
self.msg_id = None
super(Message, self).__init__(channel, raw_message)
class QoS(virtual.QoS):
"""Kombu quality of service class for ``kombu-stomp``."""
def __init__(self, *args, **kwargs):
self.ids = {}
super(QoS, self).__init__(*args, **kwargs)
def append(self, message, delivery_tag):
self.ids[delivery_tag] = message.msg_id
super(QoS, self).append(message, delivery_tag)
def ack(self, delivery_tag):
self._stomp_ack(delivery_tag)
return super(QoS, self).ack(delivery_tag)
def _stomp_ack(self, delivery_tag):
msg_id = self.ids.pop(delivery_tag, None)
if msg_id:
with self.channel.conn_or_acquire() as conn:
conn.ack(msg_id)
class Channel(virtual.Channel):
"""``kombu-stomp`` channel class."""
QoS = QoS
Message = Message
def __init__(self, *args, **kwargs):
super(Channel, self).__init__(*args, **kwargs)
self._stomp_conn = None
self._subscriptions = set()
def _get_many(self, queue, timeout=None):
"""Get next messesage from current active queues.
Note that we are ignoring any timeout due to performance
issues.
"""
with self.conn_or_acquire() as conn:
for q in queue:
self.subscribe(conn, q)
# FIXME(rafaduran): inappropriate intimacy code smell
return next(conn.message_listener.iterator())
def _put(self, queue, message, **kwargs):
with self.conn_or_acquire() as conn:
body = message.pop('body')
conn.send(self.queue_destination(queue), body, **message)
def basic_consume(self, queue, *args, **kwargs):
with self.conn_or_acquire() as conn:
self.subscribe(conn, queue)
return super(Channel, self).basic_consume(queue, *args, **kwargs)
def subscribe(self, conn, queue):
if queue in self._subscriptions:
return
self._subscriptions.add(queue)
return conn.subscribe(self.queue_destination(queue),
ack='client-individual')
def queue_unbind(self,
queue,
exchange=None,
routing_key='',
arguments=None,
**kwargs):
super(Channel, self).queue_unbind(queue,
exchange,
routing_key,
arguments,
**kwargs)
with self.conn_or_acquire() as conn:
conn.unsubscribe(self.queue_destination(queue))
self._subscriptions.discard(queue)
def queue_destination(self, queue):
return '/queue/{prefix}{name}'.format(prefix=self.prefix,
name=queue)
@contextlib.contextmanager
def conn_or_acquire(self, disconnect=False):
"""Use current connection or create a new one."""
if not self.stomp_conn.is_connected():
self.stomp_conn.start()
self.stomp_conn.connect(**self._get_conn_params())
yield self.stomp_conn
if disconnect:
self.stomp_conn.disconnect()
self.iterator = None
@property
def stomp_conn(self):
"""Property over the stomp.py connection object.
It will create the connection object at first use.
"""
if not self._stomp_conn:
self._stomp_conn = stomp.Connection(self.prefix,
**self._get_params())
return self._stomp_conn
@property
def transport_options(self):
return self.connection.client.transport_options
@utils.cached_property
def prefix(self):
return self.transport_options.get('queue_name_prefix', '')
def _get_params(self):
return {
'host_and_ports': [
(self.connection.client.hostname or '127.0.0.1',
self.connection.client.port or 61613)
],
'reconnect_attempts_max': 1,
}
def _get_conn_params(self):
return {
'username': self.connection.client.userid,
'passcode': self.connection.client.password,
'wait': True,
}
def close(self):
super(Channel, self).close()
try:
# TODO (rafaduran): do we need unsubscribe all queues first?
self.stomp_conn.disconnect()
except exc.NotConnectedException:
pass
class Transport(virtual.Transport):
"""Transport class for ``kombu-stomp``."""
Channel = Channel
| {
"content_hash": "ac2dce573a8b0fe081a052a2fadb0482",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 73,
"avg_line_length": 31.444444444444443,
"alnum_prop": 0.5609075692765483,
"repo_name": "ntteurope/kombu-stomp",
"id": "0735623de879ca3ecb9efda9766b7ca1bd76a47f",
"size": "5377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kombu_stomp/transport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24095"
}
],
"symlink_target": ""
} |
"""
MySQL database backend for Django.
Requires mysqlclient: https://pypi.org/project/mysqlclient/
"""
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils.asyncio import async_unsafe
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
try:
import MySQLdb as Database
except ImportError as err:
raise ImproperlyConfigured(
'Error loading MySQLdb module.\n'
'Did you install mysqlclient?'
) from err
from MySQLdb.constants import CLIENT, FIELD_TYPE
from MySQLdb.converters import conversions
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient
from .creation import DatabaseCreation
from .features import DatabaseFeatures
from .introspection import DatabaseIntrospection
from .operations import DatabaseOperations
from .schema import DatabaseSchemaEditor
from .validation import DatabaseValidation
version = Database.version_info
if version < (1, 4, 0):
raise ImproperlyConfigured('mysqlclient 1.4.0 or newer is required; you have %s.' % Database.__version__)
# MySQLdb returns TIME columns as timedelta -- they are more like timedelta in
# terms of actual behavior as they are signed and include days -- and Django
# expects time.
django_conversions = {
**conversions,
**{FIELD_TYPE.TIME: backend_utils.typecast_time},
}
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same).
server_version_re = _lazy_re_compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
class CursorWrapper:
"""
A thin wrapper around MySQLdb's normal cursor class that catches particular
exception instances and reraises them with the correct types.
Implemented as a wrapper, rather than a subclass, so that it isn't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (
1048, # Column cannot be null
1690, # BIGINT UNSIGNED value is out of range
3819, # CHECK constraint is violated
4025, # CHECK constraint failed
)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
raise IntegrityError(*tuple(e.args))
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
raise IntegrityError(*tuple(e.args))
raise
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BigAutoField': 'bigint AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime(6)',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'JSONField': 'json',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveBigIntegerField': 'bigint UNSIGNED',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallAutoField': 'smallint AUTO_INCREMENT',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time(6)',
'UUIDField': 'char(32)',
}
# For these data types:
# - MySQL < 8.0.13 and MariaDB < 10.2.1 don't accept default values and
# implicitly treat them as nullable
# - all versions of MySQL and MariaDB don't support full width database
# indexes
_limited_data_types = (
'tinyblob', 'blob', 'mediumblob', 'longblob', 'tinytext', 'text',
'mediumtext', 'longtext', 'json',
)
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
isolation_levels = {
'read uncommitted',
'read committed',
'repeatable read',
'serializable',
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['database'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['password'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
# Validate the transaction isolation level, if specified.
options = settings_dict['OPTIONS'].copy()
isolation_level = options.pop('isolation_level', 'read committed')
if isolation_level:
isolation_level = isolation_level.lower()
if isolation_level not in self.isolation_levels:
raise ImproperlyConfigured(
"Invalid transaction isolation level '%s' specified.\n"
"Use one of %s, or None." % (
isolation_level,
', '.join("'%s'" % s for s in sorted(self.isolation_levels))
))
self.isolation_level = isolation_level
kwargs.update(options)
return kwargs
@async_unsafe
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# bytes encoder in mysqlclient doesn't work and was added only to
# prevent KeyErrors in Django < 2.0. We can remove this workaround when
# mysqlclient 2.1 becomes the minimal mysqlclient supported by Django.
# See https://github.com/PyMySQL/mysqlclient/issues/489
if connection.encoders.get(bytes) is bytes:
connection.encoders.pop(bytes)
return connection
def init_connection_state(self):
assignments = []
if self.features.is_sql_auto_is_null_enabled:
# SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on
# a recently inserted row will return when the field is tested
# for NULL. Disabling this brings this aspect of MySQL in line
# with SQL standards.
assignments.append('SET SQL_AUTO_IS_NULL = 0')
if self.isolation_level:
assignments.append('SET SESSION TRANSACTION ISOLATION LEVEL %s' % self.isolation_level.upper())
if assignments:
with self.cursor() as cursor:
cursor.execute('; '.join(assignments))
@async_unsafe
def create_cursor(self, name=None):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disable foreign key checks, primarily for use in adding rows with
forward references. Always return True to indicate constraint checks
need to be re-enabled.
"""
with self.cursor() as cursor:
cursor.execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
with self.cursor() as cursor:
cursor.execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
""" % (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise IntegrityError(
"The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not "
"have a corresponding value in %s.%s."
% (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def display_name(self):
return 'MariaDB' if self.mysql_is_mariadb else 'MySQL'
@cached_property
def data_type_check_constraints(self):
if self.features.supports_column_check_constraints:
check_constraints = {
'PositiveBigIntegerField': '`%(column)s` >= 0',
'PositiveIntegerField': '`%(column)s` >= 0',
'PositiveSmallIntegerField': '`%(column)s` >= 0',
}
if self.mysql_is_mariadb and self.mysql_version < (10, 4, 3):
# MariaDB < 10.4.3 doesn't automatically use the JSON_VALID as
# a check constraint.
check_constraints['JSONField'] = 'JSON_VALID(`%(column)s`)'
return check_constraints
return {}
@cached_property
def mysql_server_data(self):
with self.temporary_connection() as cursor:
# Select some server variables and test if the time zone
# definitions are installed. CONVERT_TZ returns NULL if 'UTC'
# timezone isn't loaded into the mysql.time_zone table.
cursor.execute("""
SELECT VERSION(),
@@sql_mode,
@@default_storage_engine,
@@sql_auto_is_null,
@@lower_case_table_names,
CONVERT_TZ('2001-01-01 01:00:00', 'UTC', 'UTC') IS NOT NULL
""")
row = cursor.fetchone()
return {
'version': row[0],
'sql_mode': row[1],
'default_storage_engine': row[2],
'sql_auto_is_null': bool(row[3]),
'lower_case_table_names': bool(row[4]),
'has_zoneinfo_database': bool(row[5]),
}
@cached_property
def mysql_server_info(self):
return self.mysql_server_data['version']
@cached_property
def mysql_version(self):
match = server_version_re.match(self.mysql_server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % self.mysql_server_info)
return tuple(int(x) for x in match.groups())
@cached_property
def mysql_is_mariadb(self):
return 'mariadb' in self.mysql_server_info.lower()
@cached_property
def sql_mode(self):
sql_mode = self.mysql_server_data['sql_mode']
return set(sql_mode.split(',') if sql_mode else ())
| {
"content_hash": "607e2543ea61af93d18c6f07fde26e90",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 112,
"avg_line_length": 39.92118226600985,
"alnum_prop": 0.59464461994077,
"repo_name": "koordinates/django",
"id": "a8dcc7c72a9d4a9d24fc32e884c83421961cdac2",
"size": "16208",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable/3.2.x-kx",
"path": "django/db/backends/mysql/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84917"
},
{
"name": "HTML",
"bytes": "223820"
},
{
"name": "JavaScript",
"bytes": "139791"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "14472067"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
"""Including this as a dependency will result in tests NOT using MLIR bridge.
This function is defined by default in test_util.py to None. The test_util then
attempts to import this module. If this file is made available through the BUILD
rule, then this function is overridden and will instead cause Tensorflow graphs
to be always NOT be compiled with MLIR bridge.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def is_mlir_bridge_enabled():
"""Returns false if the MLIR bridge should be not be enabled for tests."""
return False
| {
"content_hash": "5e985fe2920cd11f727d99621266c63b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 37.8125,
"alnum_prop": 0.768595041322314,
"repo_name": "sarvex/tensorflow",
"id": "d2581a945017fda6bcbce83b273821f1750e4034",
"size": "1294",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tensorflow/python/framework/is_mlir_bridge_test_false.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import jsonschema
import mock
import six
from orquesta import statuses as wf_statuses
import st2tests
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
from st2actions.notifier import notifier
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as action_constants
from st2common.models.api import notification as notify_api_models
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.services import action as action_service
from st2common.services import workflows as workflow_service
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.fixtures.packs.core.fixture import PACK_PATH as CORE_PACK_PATH
from st2tests.fixtures.packs.orquesta_tests.fixture import PACK_PATH as TEST_PACK_PATH
from st2tests.mocks import execution as mock_ac_ex_xport
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
PACKS = [TEST_PACK_PATH, CORE_PACK_PATH]
MOCK_NOTIFY = {
"on-complete": {
"data": {"source_channel": "baloney", "user": "lakstorm"},
"routes": ["hubot"],
}
}
@mock.patch.object(
notifier.Notifier, "_post_notify_triggers", mock.MagicMock(return_value=None)
)
@mock.patch.object(
notifier.Notifier, "_post_generic_trigger", mock.MagicMock(return_value=None)
)
@mock.patch.object(
publishers.CUDPublisher,
"publish_update",
mock.MagicMock(side_effect=mock_ac_ex_xport.MockExecutionPublisher.publish_update),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_create",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_state",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_create",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create
),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_state",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state
),
)
class OrquestaNotifyTest(st2tests.ExecutionDbTestCase):
@classmethod
def setUpClass(cls):
super(OrquestaNotifyTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False, fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
def test_no_notify(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Check that notify is setup correctly in the db record.
self.assertDictEqual(wf_ex_db.notify, {})
def test_no_notify_task_list(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Check that notify is setup correctly in the db record.
expected_notify = {"config": MOCK_NOTIFY, "tasks": []}
self.assertDictEqual(wf_ex_db.notify, expected_notify)
def test_custom_notify_task_list(self):
wf_input = {"notify": ["task1"]}
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], parameters=wf_input
)
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Check that notify is setup correctly in the db record.
expected_notify = {"config": MOCK_NOTIFY, "tasks": wf_input["notify"]}
self.assertDictEqual(wf_ex_db.notify, expected_notify)
def test_default_notify_task_list(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "notify.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Check that notify is setup correctly in the db record.
expected_notify = {"config": MOCK_NOTIFY, "tasks": ["task1", "task2", "task3"]}
self.assertDictEqual(wf_ex_db.notify, expected_notify)
def test_notify_task_list_bad_item_value(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
expected_schema_failure_test_cases = [
"task1", # Notify must be type of list.
[123], # Item has to be type of string.
[""], # String value cannot be empty.
[" "], # String value cannot be just spaces.
[" "], # String value cannot be just tabs.
["init task"], # String value cannot have space.
["init-task"], # String value cannot have dash.
["task1", "task1"], # String values have to be unique.
]
for notify_tasks in expected_schema_failure_test_cases:
lv_ac_db.parameters = {"notify": notify_tasks}
try:
self.assertRaises(
jsonschema.ValidationError, action_service.request, lv_ac_db
)
except Exception as e:
raise AssertionError("%s: %s" % (six.text_type(e), notify_tasks))
def test_notify_task_list_nonexistent_task(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
lv_ac_db.parameters = {"notify": ["init_task"]}
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
expected_result = {
"output": None,
"errors": [
{
"message": (
"The following tasks in the notify parameter do not "
"exist in the workflow definition: init_task."
)
}
],
}
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
def test_notify_task_list_item_value(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
expected_schema_success_test_cases = [[], ["task1"], ["task1", "task2"]]
for notify_tasks in expected_schema_success_test_cases:
lv_ac_db.parameters = {"notify": notify_tasks}
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING
)
def test_cascade_notify_to_tasks(self):
wf_input = {"notify": ["task2"]}
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], parameters=wf_input
)
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Assert task1 notify is not set.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"}
tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertIsNone(tk1_lv_ac_db.notify)
self.assertEqual(
tk1_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED
)
self.assertFalse(notifier.Notifier._post_notify_triggers.called)
notifier.Notifier._post_notify_triggers.reset_mock()
# Handle task1 completion.
workflow_service.handle_action_execution_completion(tk1_ac_ex_db)
tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id)
self.assertEqual(tk1_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert task2 notify is set.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task2"}
tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk2_ex_db.id)
)[0]
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction["id"])
notify = notify_api_models.NotificationsHelper.from_model(
notify_model=tk2_lv_ac_db.notify
)
self.assertEqual(notify, MOCK_NOTIFY)
self.assertEqual(
tk2_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED
)
self.assertTrue(notifier.Notifier._post_notify_triggers.called)
notifier.Notifier._post_notify_triggers.reset_mock()
# Handle task2 completion.
workflow_service.handle_action_execution_completion(tk2_ac_ex_db)
tk2_ex_db = wf_db_access.TaskExecution.get_by_id(tk2_ex_db.id)
self.assertEqual(tk2_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert task3 notify is not set.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task3"}
tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk3_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk3_ex_db.id)
)[0]
tk3_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk3_ac_ex_db.liveaction["id"])
self.assertIsNone(tk3_lv_ac_db.notify)
self.assertEqual(
tk3_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED
)
self.assertFalse(notifier.Notifier._post_notify_triggers.called)
notifier.Notifier._post_notify_triggers.reset_mock()
# Handle task3 completion.
workflow_service.handle_action_execution_completion(tk3_ac_ex_db)
tk3_ex_db = wf_db_access.TaskExecution.get_by_id(tk3_ex_db.id)
self.assertEqual(tk3_ex_db.status, wf_statuses.SUCCEEDED)
# Assert workflow is completed.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue(notifier.Notifier._post_notify_triggers.called)
notifier.Notifier._post_notify_triggers.reset_mock()
def test_notify_task_list_for_task_with_notify(self):
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "subworkflow-with-notify-task.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], parameters={"notify": ["task2"]}
)
lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY)
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Assert task1 notify is not set.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"}
tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertIsNone(tk1_lv_ac_db.notify)
# Assert task2 notify is set.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task2"}
tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk2_ex_db.id)
)[0]
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction["id"])
notify = notify_api_models.NotificationsHelper.from_model(
notify_model=tk2_lv_ac_db.notify
)
self.assertEqual(notify, MOCK_NOTIFY)
def test_no_notify_for_task_with_notify(self):
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "subworkflow-with-notify-task.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Assert task1 notify is not set.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"}
tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertIsNone(tk1_lv_ac_db.notify)
# Assert task2 notify is not set.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task2"}
tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk2_ex_db.id)
)[0]
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction["id"])
self.assertIsNone(tk2_lv_ac_db.notify)
| {
"content_hash": "c9468558c1b20bef0e062197034650a1",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 88,
"avg_line_length": 45.0725,
"alnum_prop": 0.6533362915303123,
"repo_name": "Plexxi/st2",
"id": "ff7114a31867ced9dd2e26ce46943de5220ca285",
"size": "18657",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/runners/orquesta_runner/tests/unit/test_notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
} |
import unittest
from vodem.api import sim_imsi
class TestSimImsi(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.valid_response = {
'sim_imsi': '',
}
def test_call(self):
resp = sim_imsi()
self.assertEqual(self.valid_response, resp)
| {
"content_hash": "222be0a22cd3783dcca4679a6ba14a96",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 19,
"alnum_prop": 0.6019736842105263,
"repo_name": "alzeih/python-vodem-vodafone-K4607-Z",
"id": "890bf32b2ab461664ce8002f15d1d06332579d8c",
"size": "304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/api/test_sim_imsi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19346"
},
{
"name": "JavaScript",
"bytes": "444689"
},
{
"name": "Python",
"bytes": "84811"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
} |
from itertools import repeat
import validator.testcases.packagelayout as packagelayout
from validator.errorbundler import ErrorBundle
from helper import _do_test, MockXPI
def test_blacklisted_files():
"""Tests that the validator will throw warnings on extensions
containing files that have extensions which are not considered
safe."""
err = _do_test("tests/resources/packagelayout/ext_blacklist.xpi",
packagelayout.test_blacklisted_files,
True)
assert err.metadata["contains_binary_extension"]
assert not any(count for (key, count) in err.compat_summary.items())
# Run the compatibility test on this, but it shouldn't fail or produce
# errors because the bianry content isn't in the appropriate directories.
err = _do_test("tests/resources/packagelayout/ext_blacklist.xpi",
packagelayout.test_compatibility_binary,
False)
print err.compat_summary
assert not err.compat_summary["errors"]
def test_java_jar_detection():
"""
Test that Java archives are flagged as such so that they do not generate
hundreds or thousands of errors.
"""
classes = ("c%d.class" % i for i in xrange(1000))
mock_xpi = MockXPI(dict(zip(classes, repeat(""))))
err = ErrorBundle(None, True)
packagelayout.test_blacklisted_files(err, mock_xpi)
assert not err.failed()
assert err.notices
def test_blacklisted_magic_numbers():
"Tests that blacklisted magic numbers are banned"
err = _do_test("tests/resources/packagelayout/magic_number.xpi",
packagelayout.test_blacklisted_files,
True)
assert err.metadata["contains_binary_content"]
# Same logic as above.
err = _do_test("tests/resources/packagelayout/magic_number.xpi",
packagelayout.test_compatibility_binary,
False)
print err.compat_summary
assert not err.compat_summary["errors"]
assert "binary_components" not in err.metadata
def test_compat_binary_extensions():
"""
Test that the validator will throw compatibility errors for files that
would otherwise require the add-on to be manually updated.
"""
# This time when the compatibility checks are run, they should fire off
# compatibility errors because the files are the /components/ directory
# of the package.
err = _do_test("tests/resources/packagelayout/ext_blacklist_compat.xpi",
packagelayout.test_compatibility_binary,
False)
print err.compat_summary
assert err.compat_summary["errors"]
assert err.metadata["binary_components"]
def test_godlikea():
"""Test that packages with a godlikea chrome namespaceget rejected."""
err = ErrorBundle()
xpi = MockXPI({"chrome/godlikea.jar": True})
packagelayout.test_godlikea(err, xpi)
assert err.failed()
assert err.errors
# These functions will test the code with manually constructed packages
# that contain valid or failing versions of the specified package. The
# remaining tests will simply emulate this behaviour (since it was
# successfully tested with these functions).
def test_theme_passing():
"Tests the layout of a proper theme"
_do_test("tests/resources/packagelayout/theme.jar",
packagelayout.test_theme_layout,
False)
def test_extra_unimportant():
"""Tests the layout of a theme that contains an unimportant but
extra directory."""
_do_test("tests/resources/packagelayout/theme_extra_unimportant.jar",
packagelayout.test_theme_layout,
False)
def _do_simulated_test(function, structure, failure=False, ff4=False):
""""Performs a test on a function or set of functions without
generating a full package."""
dict_structure = {"__MACOSX/foo.bar": True}
for item in structure:
dict_structure[item] = True
err = ErrorBundle()
err.save_resource("ff4", ff4)
function(err, structure)
err.print_summary(True)
if failure:
assert err.failed()
else:
assert not err.failed()
return err
def test_langpack_max():
"""Tests the package layout module out on a simulated language pack
containing the largest number of possible elements."""
_do_simulated_test(packagelayout.test_langpack_layout,
["install.rdf",
"chrome/foo.jar",
"chrome.manifest",
"chrome/bar.test.jar",
"foo.manifest",
"bar.rdf",
"abc.dtd",
"def.jar",
"chrome/asdf.properties",
"chrome/asdf.xhtml",
"chrome/asdf.css"])
def test_langpack_sans_jars():
"""
Test that language packs don't require JAR files to be present in the
chrome/ directory.
"""
_do_simulated_test(
packagelayout.test_langpack_layout,
["install.rdf", "chrome.manifest", # Required files
"foo.manifest", "bar.rdf", "abc.dtd", "def.jar", # Allowed files
"chrome/foo.properties", "chrome/foo.xhtml", "chrome/foo.css"])
def test_dict_max():
"""Tests the package layout module out on a simulated dictionary
containing the largest number of possible elements."""
_do_simulated_test(packagelayout.test_dictionary_layout,
["install.rdf",
"dictionaries/foo.aff",
"dictionaries/bar.test.dic",
"install.js",
"dictionaries/foo.aff",
"dictionaries/bar.test.dic",
"chrome.manifest",
"chrome/whatever.jar"])
def test_unknown_file():
"""Tests that the unknown file detection function is working."""
# We test against langpack because it is incredibly strict in its
# file format.
_do_simulated_test(packagelayout.test_langpack_layout,
["install.rdf",
"chrome/foo.jar",
"chrome.manifest",
"chromelist.txt"])
def test_disallowed_file():
"""Tests that outright improper files are blocked."""
# We test against langpack because it is incredibly strict in its
# file format.
_do_simulated_test(packagelayout.test_langpack_layout,
["install.rdf",
"chrome/foo.jar",
"chrome.manifest",
"foo.bar"],
True)
def test_extra_obsolete():
"""Tests that unnecessary, obsolete files are detected."""
err = ErrorBundle()
# Tests that chromelist.txt is treated (with and without slashes in
# the path) as an obsolete file.
assert not packagelayout.test_unknown_file(err, "x//whatever.txt")
assert not packagelayout.test_unknown_file(err, "whatever.txt")
assert packagelayout.test_unknown_file(err, "x//chromelist.txt")
assert packagelayout.test_unknown_file(err, "chromelist.txt")
assert not err.failed()
def test_has_installrdfs():
"""Tests that install.rdf files are present and that subpackage
rules are respected."""
# Test package to make sure has_install_rdf is set to True.
assert not _do_installrdfs(packagelayout.test_layout_all)
assert _do_installrdfs(packagelayout.test_layout_all, False)
mock_xpi_subpack = MockXPI({}, subpackage=True)
# Makes sure the above test is ignored if the package is a
# subpackage.
assert not _do_installrdfs(packagelayout.test_layout_all,
False,
mock_xpi_subpack)
assert not _do_installrdfs(packagelayout.test_layout_all,
True,
mock_xpi_subpack)
class MockDupeZipFile(object):
"""Mock a ZipFile class, simulating duplicate filename entries."""
def namelist(self):
return ["foo.bar", "foo.bar"]
class MockDupeXPI(object):
"""Mock the XPIManager class, simulating duplicate filename entries."""
def __init__(self):
self.zf = MockDupeZipFile()
self.subpackage = False
def test_duplicate_files():
"""Test that duplicate files in a package are caught."""
err = ErrorBundle()
err.save_resource("has_install_rdf", True)
packagelayout.test_layout_all(err, MockDupeXPI())
assert err.failed()
def _do_installrdfs(function, has_install_rdf=True, xpi=None):
"Helps to test that install.rdf files are present"
err = ErrorBundle()
if has_install_rdf:
content = {"install.rdf" : True}
err.save_resource("has_install_rdf", True)
else:
content = {}
if xpi is None:
xpi = MockXPI(content)
function(err, xpi)
return err.failed()
| {
"content_hash": "b0d5970941cae03da76b04ecd5c3a74d",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 78,
"avg_line_length": 32.26258992805755,
"alnum_prop": 0.6227004125320549,
"repo_name": "mattbasta/amo-validator",
"id": "8fba1a42a9973024a80a4647623e4182fe83da54",
"size": "8969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_packagelayout.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "466"
},
{
"name": "JavaScript",
"bytes": "272163"
},
{
"name": "Python",
"bytes": "846972"
},
{
"name": "Shell",
"bytes": "1619"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('voucher', '0002_auto_20170418_2132'),
]
operations = [
migrations.AlterField(
model_name='voucher',
name='offers',
field=models.ManyToManyField(limit_choices_to={'offer_type': 'Voucher'}, related_name='vouchers', to='offer.ConditionalOffer', verbose_name='Offers'),
),
]
| {
"content_hash": "1c8c35c26ba1a6f1a48633d136e25137",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 162,
"avg_line_length": 28.125,
"alnum_prop": 0.6155555555555555,
"repo_name": "sasha0/django-oscar",
"id": "55bb676506480b0be6dbbcb512dbc39ec4250291",
"size": "497",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/oscar/apps/voucher/migrations/0003_auto_20171212_0411.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "387941"
},
{
"name": "Dockerfile",
"bytes": "544"
},
{
"name": "HTML",
"bytes": "518624"
},
{
"name": "JavaScript",
"bytes": "344864"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "1957797"
},
{
"name": "Shell",
"bytes": "1643"
}
],
"symlink_target": ""
} |
import urllib2
from urllib import urlencode
class DeCaptcher(object):
"""
Unofficial python client for de-captcher.com API
"""
def __init__(self, username, password):
self.url = "http://poster.de-captcher.com/"
self.username = username
self.password = password
def check_credentials(self):
"""
Checks out supplied credentials are valid or not?
:return:
"""
data = {"function": "balance",
"username": self.username,
"password": self.password}
response = self.__api(data)
return False if response == "" else True
def get_balance(self):
"""
Get current balance
:return:
"""
data = {"function": "balance",
"username": self.username,
"password": self.password}
response = self.__api(data)
return response
def solve_image(self, p):
"""
Send image as binary format and get text.
:param p:
:return:
"""
data = {"function": "picture2",
"username": self.username,
"password": self.password,
"pict_type": "0",
"pict_to": "0",
"pict": open(p, "rb").read()}
response = self.__api(data)
answer = response.split("|")[-1]
return answer
def __api(self, data):
"""
Simple HTTP Post function with build-in functions
in order to serve this tool without dependencies like `requests` etc.
:param data:
:return:
"""
data = urlencode(data)
req = urllib2.Request(self.url, data)
return urllib2.urlopen(req).read() | {
"content_hash": "4da9ed26b72f04b553e37e37fe5159e6",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 28.688524590163933,
"alnum_prop": 0.52,
"repo_name": "mmetince/captchasec",
"id": "67ccb8c6ec0763a8bda26b56947bbd1d24292f5d",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "captchasec/decaptcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12635"
}
],
"symlink_target": ""
} |
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime, date, time
from decimal import Decimal
from uuid import UUID, uuid4
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.usertype import UserType
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace
from cassandra.util import Date, Time
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class UserDefinedTypeTests(BaseCassEngTestCase):
def setUp(self):
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest("UDTs require native protocol 3+, currently using: {0}".format(PROTOCOL_VERSION))
def test_can_create_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John")
self.assertEqual(42, user.age)
self.assertEqual("John", user.name)
# Add a field
class User(UserType):
age = columns.Integer()
name = columns.Text()
gender = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John", gender="male")
self.assertEqual(42, user.age)
self.assertEqual("John", user.name)
self.assertEqual("male", user.gender)
# Remove a field
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John", gender="male")
with self.assertRaises(AttributeError):
user.gender
def test_can_insert_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
UserModel.create(id=0, info=user)
self.assertEqual(1, UserModel.objects.count())
john = UserModel.objects().first()
self.assertEqual(0, john.id)
self.assertTrue(type(john.info) is User)
self.assertEqual(42, john.info.age)
self.assertEqual("John", john.info.name)
def test_can_update_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
created_user = UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertEqual("John", john_info.name)
created_user.info = User(age=22, name="Mary")
created_user.save()
mary_info = UserModel.objects().first().info
self.assertEqual(22, mary_info.age)
self.assertEqual("Mary", mary_info.name)
def test_can_create_same_udt_different_keyspaces(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
create_keyspace_simple("simplex", 1)
sync_type("simplex", User)
drop_keyspace("simplex")
def test_can_insert_partial_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
gender = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertEqual("John", john_info.name)
self.assertIsNone(john_info.gender)
user = User(age=42)
UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertIsNone(john_info.name)
self.assertIsNone(john_info.gender)
def test_can_insert_nested_udts(self):
class Depth_0(UserType):
age = columns.Integer()
name = columns.Text()
class Depth_1(UserType):
value = columns.UserDefinedType(Depth_0)
class Depth_2(UserType):
value = columns.UserDefinedType(Depth_1)
class Depth_3(UserType):
value = columns.UserDefinedType(Depth_2)
class DepthModel(Model):
id = columns.Integer(primary_key=True)
v_0 = columns.UserDefinedType(Depth_0)
v_1 = columns.UserDefinedType(Depth_1)
v_2 = columns.UserDefinedType(Depth_2)
v_3 = columns.UserDefinedType(Depth_3)
sync_table(DepthModel)
udts = [Depth_0(age=42, name="John")]
udts.append(Depth_1(value=udts[0]))
udts.append(Depth_2(value=udts[1]))
udts.append(Depth_3(value=udts[2]))
DepthModel.create(id=0, v_0=udts[0], v_1=udts[1], v_2=udts[2], v_3=udts[3])
output = DepthModel.objects().first()
self.assertEqual(udts[0], output.v_0)
self.assertEqual(udts[1], output.v_1)
self.assertEqual(udts[2], output.v_2)
self.assertEqual(udts[3], output.v_3)
def test_can_insert_udts_with_nones(self):
"""
Test for inserting all column types as empty into a UserType as None's
test_can_insert_udts_with_nones tests that each cqlengine column type can be inserted into a UserType as None's.
It first creates a UserType that has each cqlengine column type, and a corresponding table/Model. It then creates
a UserType instance where all the fields are None's and inserts the UserType as an instance of the Model. Finally,
it verifies that each column read from the UserType from Cassandra is None.
@since 2.5.0
@jira_ticket PYTHON-251
@expected_result The UserType is inserted with each column type, and the resulting read yields None's for each column.
@test_category data_types:udt
"""
class AllDatatypes(UserType):
a = columns.Ascii()
b = columns.BigInt()
c = columns.Blob()
d = columns.Boolean()
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float(double_precision=False)
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
l = columns.TimeUUID()
m = columns.UUID()
n = columns.VarInt()
class AllDatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(AllDatatypes)
sync_table(AllDatatypesModel)
input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None,
l=None, m=None, n=None)
AllDatatypesModel.create(id=0, data=input)
self.assertEqual(1, AllDatatypesModel.objects.count())
output = AllDatatypesModel.objects().first().data
self.assertEqual(input, output)
def test_can_insert_udts_with_all_datatypes(self):
"""
Test for inserting all column types into a UserType
test_can_insert_udts_with_all_datatypes tests that each cqlengine column type can be inserted into a UserType.
It first creates a UserType that has each cqlengine column type, and a corresponding table/Model. It then creates
a UserType instance where all the fields have corresponding data, and inserts the UserType as an instance of the Model.
Finally, it verifies that each column read from the UserType from Cassandra is the same as the input parameters.
@since 2.5.0
@jira_ticket PYTHON-251
@expected_result The UserType is inserted with each column type, and the resulting read yields proper data for each column.
@test_category data_types:udt
"""
class AllDatatypes(UserType):
a = columns.Ascii()
b = columns.BigInt()
c = columns.Blob()
d = columns.Boolean()
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float(double_precision=False)
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
l = columns.TimeUUID()
m = columns.UUID()
n = columns.VarInt()
class AllDatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(AllDatatypes)
sync_table(AllDatatypesModel)
input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True,
e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39,
h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text',
l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'),
m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000'))
AllDatatypesModel.create(id=0, data=input)
self.assertEqual(1, AllDatatypesModel.objects.count())
output = AllDatatypesModel.objects().first().data
for i in range(ord('a'), ord('a') + 14):
self.assertEqual(input[chr(i)], output[chr(i)])
def test_can_insert_udts_protocol_v4_datatypes(self):
"""
Test for inserting all protocol v4 column types into a UserType
test_can_insert_udts_protocol_v4_datatypes tests that each protocol v4 cqlengine column type can be inserted
into a UserType. It first creates a UserType that has each protocol v4 cqlengine column type, and a corresponding
table/Model. It then creates a UserType instance where all the fields have corresponding data, and inserts the
UserType as an instance of the Model. Finally, it verifies that each column read from the UserType from Cassandra
is the same as the input parameters.
@since 2.6.0
@jira_ticket PYTHON-245
@expected_result The UserType is inserted with each protocol v4 column type, and the resulting read yields proper data for each column.
@test_category data_types:udt
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes in UDTs require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
class Allv4Datatypes(UserType):
a = columns.Date()
b = columns.SmallInt()
c = columns.Time()
d = columns.TinyInt()
class Allv4DatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(Allv4Datatypes)
sync_table(Allv4DatatypesModel)
input = Allv4Datatypes(a=Date(date(1970, 1, 1)), b=32523, c=Time(time(16, 47, 25, 7)), d=123)
Allv4DatatypesModel.create(id=0, data=input)
self.assertEqual(1, Allv4DatatypesModel.objects.count())
output = Allv4DatatypesModel.objects().first().data
for i in range(ord('a'), ord('a') + 3):
self.assertEqual(input[chr(i)], output[chr(i)])
def test_nested_udts_inserts(self):
"""
Test for inserting collections of user types using cql engine.
test_nested_udts_inserts Constructs a model that contains a list of usertypes. It will then attempt to insert
them. The expectation is that no exception is thrown during insert. For sanity sake we also validate that our
input and output values match. This combination of model, and UT produces a syntax error in 2.5.1 due to
improper quoting around the names collection.
@since 2.6.0
@jira_ticket PYTHON-311
@expected_result No syntax exception thrown
@test_category data_types:udt
"""
class Name(UserType):
type_name__ = "header"
name = columns.Text()
value = columns.Text()
class Container(Model):
id = columns.UUID(primary_key=True, default=uuid4)
names = columns.List(columns.UserDefinedType(Name))
# Construct the objects and insert them
names = []
for i in range(0, 10):
names.append(Name(name="name{0}".format(i), value="value{0}".format(i)))
# Create table, insert data
sync_table(Container)
Container.create(id=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), names=names)
# Validate input and output matches
self.assertEqual(1, Container.objects.count())
names_output = Container.objects().first().names
self.assertEqual(names_output, names)
| {
"content_hash": "30c46fc381d13740ac6a2aeae178e931",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 143,
"avg_line_length": 37.14565826330532,
"alnum_prop": 0.6181283462785612,
"repo_name": "jregovic/python-driver",
"id": "bf2f37020c8a44cb6f6ddc0a2042aa7352b02030",
"size": "13835",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/integration/cqlengine/model/test_udts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28918"
},
{
"name": "Python",
"bytes": "1710751"
}
],
"symlink_target": ""
} |
import logging
from collections import OrderedDict
from time import sleep
from decimal import Decimal
from random import random
from django.conf import settings
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from localflavor.se.forms import SEPersonalIdentityNumberField
import stripe
from rest_framework import serializers
from rest_framework.reverse import reverse
from rest_framework_expandable import ExpandableSerializerMixin
from rest_framework_jwt.utils import jwt_payload_handler as original_jwt_payload_handler
from . import exceptions, models
logger = logging.getLogger(__name__)
def jwt_payload_handler(user):
payload = original_jwt_payload_handler(user)
payload['name'] = user.get_full_name()
payload['nin'] = user.nin
return payload
class EventSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Event
fields = [
'url',
'id',
'slug',
'name',
'description',
'organization'
]
class OrganizationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Organization
fields = [
'url',
'id',
'name',
'organization_number',
'address',
'email'
]
class PurchasePaymentSerializer(serializers.Serializer):
# Hard coded to Stripe for the time being.
type = serializers.ChoiceField(choices=(('stripe', _('Stripe')),))
payload = serializers.CharField()
amount = serializers.DecimalField(max_digits=9, decimal_places=2)
class PurchaseTicketSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Ticket
fields = (
'url',
'id',
'ticket_type',
'variation_choices'
)
extra_kwargs = {
'ticket_type': {
# Don't filter on general availability! We might have access
# codes.
'queryset': models.TicketType.objects.published()
}
}
def validate(self, attrs):
if (list(models.Variation.objects.filter(choices__in=attrs['variation_choices']).distinct()) != list(attrs['ticket_type'].variations.all()) or len(attrs['variation_choices']) != attrs['ticket_type'].variations.count()):
raise exceptions.InvalidVariationChoices(_('Invalid variation choices.'))
return attrs
class PurchaseTransactionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Transaction
fields = (
'amount',
)
class PurchaseMessageSerializer(serializers.Serializer):
code = serializers.CharField()
text = serializers.CharField()
entity = serializers.CharField()
class PurchaseUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.User
fields = (
'nin',
)
extra_kwargs = {
'nin': {'max_length': 13} # Allow YYYYMMDD-XXXX
}
def validate_nin(self, value):
return SEPersonalIdentityNumberField(required=False, coordination_number=False).clean(value=value)
class PurchaseSerializer(serializers.Serializer):
tickets = PurchaseTicketSerializer(
many=True,
allow_empty=False)
# Request only
access_codes = serializers.SlugRelatedField(
write_only=True,
many=True,
slug_field='token',
queryset=models.AccessCode.objects.utilizable(),
allow_empty=True)
payment = PurchasePaymentSerializer(
write_only=True)
user = PurchaseUserSerializer(
write_only=True)
# Response only
messages = PurchaseMessageSerializer(
read_only=True,
many=True,
allow_empty=True)
transactions = PurchaseTransactionSerializer(
read_only=True,
many=True,
allow_null=True)
def validate_tickets(self, value):
used_ticket_type_ids = [t.get('ticket_type').id for t in value]
# We only want exactly one organization. Many organizations *can* be
# handled but implicates troubles if payment to one organization
# succeeds but not to another.
if models.Organization.objects.filter(
events__ticket_types__id__in=used_ticket_type_ids
).distinct().count() != 1:
raise exceptions.MultipleOrganizations()
if models.TicketType.objects.filter(
id__in=used_ticket_type_ids,
conflicts_with__id__in=used_ticket_type_ids
).exists():
raise exceptions.ConflictingTicketTypes()
return value
def create(self, validated_data):
# * Validate ticket types according to general avail. and access code
# * Create ticket objects
# * Charge with Stripe
# * Rollback transaction if Stripe fails
response_data = OrderedDict((
('tickets', []),
('transactions', []),
('messages', [])
))
request = self.context['request']
user = request.user
tickets_data = validated_data.get('tickets')
# We've already validated there is only one organization in play here.
organization = tickets_data[0]['ticket_type'].event.organization
unclear_tickets = set()
safe_tickets = set()
lost_tickets = set()
with transaction.atomic():
for ticket_data in tickets_data:
for access_code in validated_data.get('access_codes'):
if access_code and access_code.ticket_type == ticket_data['ticket_type']:
ticket_data['access_code'] = access_code
# Keeping M2M until we have saved
variation_choices = ticket_data.pop('variation_choices')
ticket = models.Ticket.objects.create(pending=True, **ticket_data)
ticket.variation_choices.set(variation_choices)
models.TicketOwnership.objects.create(ticket=ticket, user=user)
unclear_tickets.add(ticket)
try:
while True:
for ticket in unclear_tickets:
if not ticket.ticket_type.is_generally_available:
# Ticket type not available for purchase
if not ticket.access_code:
lost_tickets.add(ticket)
response_data['messages'].append(dict(
code='ticket_type_unavailable',
text=_("The ticket type '{}' is not available for purchase.".format(
ticket.ticket_type.name)),
entity=reverse('tickettype-detail', kwargs={
'pk': ticket.ticket_type_id}, request=request)))
continue
# Access code is supplied, has an impact on availability
# and is utilized by a ticket before this one.
if ticket.access_code and ticket.access_code.tickets.before_in_queue(ticket=ticket).exists():
lost_tickets.add(ticket)
response_data['messages'].append(OrderedDict((
('code', 'access_code_utilized'),
('text', _("The access code is already utilized."))
)))
continue
if models.Ticket.objects.filter(ticket_type__conflicts_with=ticket.ticket_type).before_in_queue(ticket=ticket).owned_by(user).exists():
lost_tickets.add(ticket)
response_data['messages'].append(dict(
code='ticket_type_conflicting',
text=_('The ticket type conflicts with another ticket of yours.'),
entity=reverse('tickettype-detail', kwargs={'pk': ticket.ticket_type_id}, request=request)
))
continue
# No tickets left
if not ticket.ticket_type.within_max_total_quantity:
lost_tickets.add(ticket)
response_data['messages'].append(OrderedDict((
('code', 'ticket_type_max_total_quantity_exceeded'),
('text', _("There are no tickets of the type '{}' left.".format(ticket.ticket_type.name))),
('entity', reverse('tickettype-detail', kwargs={'pk': ticket.ticket_type_id}, request=request))
)))
continue
if ticket.ticket_type.max_personal_quantity and ticket.ticket_type.tickets.before_in_queue(ticket=ticket).owned_by(user).count() >= ticket.ticket_type.max_personal_quantity:
lost_tickets.add(ticket)
response_data['messages'].append(OrderedDict((
('code', 'ticket_type_max_personal_quantity_exceeded'),
('text', _(
"You have reached your personal limit for the ticket type '{}'.".format(
ticket.ticket_type.name))),
('entity', reverse('tickettype-detail', kwargs={
'pk': ticket.ticket_type_id}, request=request))
)))
continue
max_total_quantity = ticket.ticket_type.max_total_quantity
position = ticket.ticket_type.tickets.before_in_queue(ticket=ticket).count()
# position is zero-indexed
if not max_total_quantity or position < max_total_quantity:
# Since the position never gets higher, we now know that
# this ticket is safe. Great!
safe_tickets.add(ticket)
continue
# Everything above this position will be discarded.
cutoff_position = 2 * max_total_quantity - ticket.ticket_type.tickets.unpending().count()
# Cutting off randomly (although weighted) up to the point
# where there are as many pending over the maximum quantity
# as under it. (Beneath this point all are discarded)
if random() <= (
(position - max_total_quantity) /
(cutoff_position - max_total_quantity or 0.001) # Save us from ZeroDivisionError
) ** 3: # Make it much harder near the cutoff
lost_tickets.add(ticket)
response_data['messages'].append(OrderedDict((
('code', 'bad_queue_position'),
('text', _(
"You didn't get a good enough queue position to get the ticket '{}'.".format(
ticket.ticket_type.name))),
('entity', reverse('tickettype-detail', kwargs={
'pk': ticket.ticket_type_id}, request=request))
)))
continue
unclear_tickets = unclear_tickets.difference(safe_tickets,
lost_tickets)
# Delete all lost tickets so we release the positions to
# concurrent buyers
for ticket in lost_tickets:
ticket.delete()
lost_tickets.clear()
if unclear_tickets:
# Wait for a while and retry.
sleep(0.5)
continue
else:
# We know every ticket's destiny.
break
except Exception as exc:
# Something went really wrong and we must clean up the mess before
# raising the exception.
for ticket in unclear_tickets | safe_tickets | lost_tickets:
ticket.delete()
raise exc
if safe_tickets:
charge_amount = Decimal(0)
for ticket in safe_tickets:
# There should be one ownership per ticket, hence get()
charge_amount += ticket.ownerships.get().price
try:
assert charge_amount <= validated_data['payment']['amount']
charge = stripe.Charge.create(
stripe_account=organization.stripe_account_id,
source=validated_data['payment']['payload'],
amount=int(charge_amount * 100),
currency=settings.CURRENCY,
receipt_email=user.email,
metadata=dict(
bitket_ticket_ownerships=','.join(map(lambda x: str(x.ownerships.get().pk), safe_tickets))
)
)
except Exception as exc:
# Something went wrong and we must clean up the mess.
for ticket in safe_tickets:
ticket.delete()
if isinstance(exc, stripe.error.CardError):
# See https://stripe.com/docs/api#errors
error = exc.json_body['error']
raise exceptions.PaymentFailed(error['message'])
raise exc
purchase_transaction = models.Transaction.objects.create(
amount=charge_amount,
stripe_charge=charge.id
)
response_data['transactions'].append(purchase_transaction)
# Must be done before sending the confirmation emails
user.nin = validated_data['user']['nin']
user.save()
for ticket in safe_tickets:
ticket.pending = False
ticket_ownership = ticket.ownerships.latest()
purchase_transaction.ticket_ownerships.add(ticket_ownership)
ticket.save()
response_data['tickets'].append(ticket)
try:
ticket_ownership.email_confirmation()
except Exception as exc:
# This should not stop us.
logger.exception('Exception during mail send')
return response_data
class TicketSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Ticket
fields = [
'url',
'id',
'ticket_type',
'variation_choices',
'utilized'
]
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.User
fields = [
'url',
'id',
'name',
'email'
]
class PrivilegedUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.User
fields = [
'url',
'id',
'name',
'nin',
'email'
]
class TicketOwnershipSerializer(ExpandableSerializerMixin,
serializers.HyperlinkedModelSerializer):
resell_token = serializers.SerializerMethodField()
qr = serializers.SerializerMethodField()
price = serializers.DecimalField(max_digits=9, decimal_places=2)
utilized = serializers.SerializerMethodField()
class Meta:
model = models.TicketOwnership
fields = [
'url',
'id',
'ticket',
'user',
'code',
'qr',
'price',
'resell_token',
'is_current',
'utilized'
]
expandable_fields = {
'ticket': (TicketSerializer, list(), dict()),
'user': (PrivilegedUserSerializer, list(), dict())
}
def get_qr(self, obj):
if obj.is_current:
return b'data:image/png;base64,' + obj.get_qr()
return None
def get_resell_token(self, obj):
if self.context['request'].user != obj.user:
return None
return obj.resell_token
def get_utilized(self, obj):
return obj.ticket.utilized
class VariationChoiceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.VariationChoice
fields = [
'url',
'id',
'variation',
'name',
'delta',
'index'
]
class VariationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Variation
fields = [
'url',
'id',
'ticket_type',
'name',
'index'
]
class _VariationSerializer(VariationSerializer):
choices = VariationChoiceSerializer(many=True)
class Meta:
model = models.Variation
fields = [
'url',
'id',
'name',
'choices'
]
class AccessCodeSerializer(serializers.HyperlinkedModelSerializer):
token = serializers.SerializerMethodField()
class Meta:
model = models.AccessCode
fields = [
'token',
'ticket_type',
]
def get_token(self, obj):
return self.context['view'].kwargs.get('token', obj.token)
class TicketTypeSerializer(serializers.HyperlinkedModelSerializer):
modifiers = serializers.SerializerMethodField()
availability = serializers.SerializerMethodField()
class Meta:
model = models.TicketType
fields = [
'url',
'id',
'event',
'name',
'description',
'price',
'modifiers',
'availability',
'conflicts_with',
'index'
]
def get_availability(self, obj):
return OrderedDict((
('general', obj.is_generally_available),
('total_quantity', obj.within_max_total_quantity)
))
def get_modifiers(self, obj):
class _ModifierSerializer(serializers.ModelSerializer):
condition = serializers.SerializerMethodField()
class Meta:
model = models.Modifier
fields = [
'condition',
'delta'
]
def get_condition(self, obj):
return '{}'.format(obj.condition_subclass)
user = self.context['request'].user
modifiers = obj.modifiers.eligible(user).select_related('condition')
serializer = _ModifierSerializer(modifiers, read_only=True, many=True)
return serializer.data
| {
"content_hash": "e64f79c0532e33e8cc7bdec7d096d7cf",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 227,
"avg_line_length": 35.83427495291902,
"alnum_prop": 0.5399936935043095,
"repo_name": "ovidner/bitket",
"id": "5edd39df47aa10f557ca14ea347dce62edc55b86",
"size": "19028",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bitket/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2021"
},
{
"name": "HTML",
"bytes": "4404"
},
{
"name": "JavaScript",
"bytes": "99011"
},
{
"name": "Python",
"bytes": "138914"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
} |
def is_palindrome(string):
""" (str) -> bool
Return True if and only if string is a palindrome.
Precondition: string is all in lowercase.
>>>> is_palindrome('ABCDEFG')
''
>>>> is_palindrome('madamimadam')
True
>>>> is_palindrome('Racecar')
''
>>>> is_palindrome('racecars')
False
"""
if string.islower():
return string == string[::-1]
else:
return ''
def is_palindromic_phrase(phrase):
""" (str) -> bool
Return True if and only if the string phrase is a palindrome,
ignoring both case (considering uppercase equivalent to their lowercase
form) and non-alphabetic characters (ignoring these completely).
>>>> is_palindromic_phrase('Madam, I'm Adam')
True
>>>> is_palindromic_phrase('A man, a plan, a canal, Panama.')
True
>>>> is_palindromic_phrase('The rain in spain falls mainly on the plane')
False
"""
phrase = phrase.lower()
alphabet = 'abcdefghijklmnopqrstuvwxyz'
new_phrase = ''
for char in phrase:
if char in alphabet:
new_phrase += char
return is_palindrome(new_phrase)
def get_odd_palindrome_at(string, index):
""" (str, int) -> str
Returns the longest odd-length palindrome in the string which is centred
at the specified index.
Preconditions: 0 <= index <= len(string), string is all in lowercase.
>>>> get_odd_palindrome_at('abcdefedcba', 5)
'abcdefedcba'
>>>> get_odd_palindrome_at('abcdefedcbafgsfds', 5)
'abcdefedcba'
>>>> get_odd_palindrome_at('fgsfdsabcdefedcba', 11)
'abcdefedcba'
>>>> get_odd_palindrome_at('ABCDEFEDCBA', 5)
''
"""
if string.islower():
palindrome = ''
distance_from_end = len(string) - index
if distance_from_end < index:
count = distance_from_end
else:
count = index
for spot in range(1, count + 1):
candidate = (string[index - spot: index + spot + 1])
if is_palindrome(candidate) and len(candidate) > len(palindrome):
palindrome = candidate
return palindrome
else:
return '' | {
"content_hash": "44b34b58d8509cf30bc4fe1e4a19af1a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 29.263157894736842,
"alnum_prop": 0.5849820143884892,
"repo_name": "mdnu/snake",
"id": "887341fab9e64471099f6dac18cf3f9521f9bffc",
"size": "2224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csc work/A2/palindromes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177549"
}
],
"symlink_target": ""
} |
"""Geometric transforms (e.g. rigid transformation)."""
import dataclasses
from typing import Union
import tensorflow as tf
TensorLike = tf.types.experimental.TensorLike
@dataclasses.dataclass
class Isometry:
"""3D transform object used to represent an SE(3) (isometric) transform.
Underneath this class stores the transform [R|t] composed of rotation (R) and
translation (t).
Usage example:
frameB_from_frameA = Isometry(R=tf.eye(3), t=tf.ones(3))
pointA = tf.random.uniform(shape=(3,))
pointB = frameB_from_frameA * pointA
pointA = frameB_from_frameA.inverse() * pointB
Compose multiple transforms:
frameA_to_frameB = Isometry(...)
frameB_to_frameC = Isometry(...)
frameA_to_frameC = frameB_to_frameC * frameA_to_frameB
Apply transform on single point:
pointB = frameA_to_frameB * tf.constant([4.0, 2.0, 1.0])
Apply transform on a pointcloud (Nx3):
pointcloudC = frameA_to_frameC * tf.random.uniform(shape=(1000, 3))
Apply transform on multiple batches of pointcloud (MxNx3):
pointcloudC = frameA_to_frameC * tf.random.uniform(shape=(5, 1000, 3))
"""
# Rotation component with tensor shape (3, 3)
R: TensorLike # pylint: disable=invalid-name
# Translation component with tensor shape (3,)
t: TensorLike
def __post_init__(self):
self.R = tf.ensure_shape(tf.convert_to_tensor(self.R), (3, 3))
self.t = tf.ensure_shape(tf.convert_to_tensor(self.t), (3,))
@classmethod
def from_matrix(cls, matrix: TensorLike) -> 'Isometry':
"""Constructs from a 3x4 or 4x4 transform matrix."""
return cls(R=matrix[:3, :3], t=matrix[:3, 3])
def matrix3x4(self) -> tf.Tensor:
"""Returns as 3x4 matrix.
Returns a matrix [R|t] of shape (3, 4)
"""
return tf.concat((self.R, tf.reshape(self.t, (3, 1))), axis=1)
def matrix4x4(self) -> tf.Tensor:
"""Returns as 4x4 matrix.
Returns a matrix [R|t] of shape (4, 4)
[0|1]
"""
return tf.concat((self.matrix3x4(), [[0, 0, 0, 1]]), axis=0)
def inverse(self) -> 'Isometry':
"""Returns the inverse of self.
Usage example:
frameB_from_frameA = Isometry(R=tf.eye(3), t=tf.ones(3))
frameA_from_frameB = frameB_from_frameA.inverse()
Returns:
Inverse transform of self.
"""
return Isometry(
R=tf.transpose(self.R),
t=-tf.linalg.matvec(tf.transpose(self.R), self.t),
)
def compose(self, other: 'Isometry') -> 'Isometry':
"""Returns the composite transform equal to self * other.
This function is used to compose multiple transforms together. This can
alternatively be achieved via `*` operator.
Usage example:
frameB_from_frameA = Isometry(R=..., t=...)
frameC_from_frameB = Isometry(R=..., t=...)
frameC_from_frameA = frameC_from_frameB.compose(frameB_from_frameA)
Args:
other: Another transform to compose with.
Returns:
Composite transform equal to self * other.
"""
return Isometry(
R=self.R @ other.R, t=tf.linalg.matvec(self.R, other.t) + self.t
)
def transform_points(self, points: TensorLike) -> tf.Tensor:
"""Computes the transformation of a set of points.
frameA_to_frameB = Isometry()
pointsA = tf.random.uniform(shape=(1000, 3))
pointsB = frameA_to_frameB.transform_points(pointsA)
Args:
points: Tensor containing point positions of shape (..., 3).
Returns:
Transformed points.
"""
return tf.einsum('ij,...j->...i', self.R, points) + self.t
def __mul__(
self, other: Union['Isometry', TensorLike]
) -> Union['Isometry', tf.Tensor]:
"""Returns the product of self with other i.e.
out = self * other.
This function can be used to transform point(s) or compose multiple
transforms together.
Compose multiple transforms:
frameA_to_frameB = Isometry(...)
frameB_to_frameC = Isometry(...)
frameA_to_frameC = frameB_to_frameC * frameA_to_frameB
Apply transform on single point:
pointB = frameA_to_frameB * tf.constant([4.0, 2.0, 1.0])
Apply transform on a pointcloud (Nx3):
pointcloudC = frameA_to_frameC * tf.random.uniform(shape=(1000, 3))
Apply transform on multiple batches of pointcloud (MxNx3):
pointcloudC = frameA_to_frameC * tf.random.uniform(shape=(5, 1000, 3))
Args:
other: Either 3D point(s) to transform or other transform to compose with.
Returns:
When multiplying with another Isometry object `other`, the composite
transform equal to `(this * other)` is returned. When `other` is a point
cloud tensor with shape (..., 3), the output is the transformed point
cloud (with same shape).
"""
if isinstance(other, Isometry):
return self.compose(other)
else:
return self.transform_points(other)
| {
"content_hash": "e61b579c01aa8d203575dcef103f8fb2",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 80,
"avg_line_length": 30.11320754716981,
"alnum_prop": 0.6560150375939849,
"repo_name": "google-research/sunds",
"id": "ddfb4885f96378886114cee4a762244470dd7274",
"size": "5371",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sunds/core/tf_geometry/isometry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "202324"
}
],
"symlink_target": ""
} |
"""
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import warnings
import re
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes
from django.utils import six
from django.utils import timezone
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("SQLite received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("decimal"), decoder(util.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2, 4, 1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it (Python 2.6 and up).
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def value_to_db_datetime(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def year_lookup_bounds(self, value):
first = '%s-01-01'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return util.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return parse_date(value)
elif internal_type == 'DateTimeField':
return parse_datetime_with_timezone_support(value)
elif internal_type == 'TimeField':
return parse_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overriden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
# Register extract, date_trunc, and regexp functions.
conn.create_function("django_extract", 2, _sqlite_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
return conn
def init_connection_state(self):
pass
def _sqlite_create_connection(self):
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
def _cursor(self):
if self.connection is None:
self._sqlite_create_connection()
return self.connection.cursor(factory=SQLiteCursorWrapper)
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query)
try:
return Database.Cursor.execute(self, query, params)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def executemany(self, query, param_list):
query = self.convert_query(query)
try:
return Database.Cursor.executemany(self, query, param_list)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%','%')
def _sqlite_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = util.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(dt)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, re_string))
| {
"content_hash": "b30072a826aef41d89fcab07c42943b5",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 120,
"avg_line_length": 43.167053364269144,
"alnum_prop": 0.6437516796560064,
"repo_name": "blaze33/django",
"id": "1fcc222c80baa045106f62cfacf5b7c24d8c24d3",
"size": "18605",
"binary": false,
"copies": "3",
"ref": "refs/heads/ticket_19456",
"path": "django/db/backends/sqlite3/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "94313"
},
{
"name": "Python",
"bytes": "8243959"
},
{
"name": "Shell",
"bytes": "6521"
}
],
"symlink_target": ""
} |
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.function_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_fft2d": "tf.fft2d",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"],
"tf.nn.sigmoid_cross_entropy_with_logits": [
"logits", "labels", "name"]
}
# Specially handled functions.
self.function_handle = {"tf.reverse": self._reverse_handler}
@staticmethod
def _reverse_handler(file_edit_recorder, node):
# TODO(aselle): Could check for a literal list of bools and try to convert
# them to indices.
comment = ("ERROR: tf.reverse has had its argument semantics changed\n"
"significantly the converter cannot detect this reliably, so you"
"need to inspect this usage manually.\n")
file_edit_recorder.add(comment,
node.lineno,
node.col_offset,
"tf.reverse",
"tf.reverse",
error="tf.reverse requires manual check.")
class FileEditTuple(collections.namedtuple(
"FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class TensorFlowCallVisitor(ast.NodeVisitor):
"""AST Visitor that finds TensorFlow Function calls.
Updates function calls from old API version to new API version.
"""
def __init__(self, filename, lines):
self._filename = filename
self._file_edit = FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = APIChangeSpec()
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name and full_name.startswith("tf."):
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if (argval_lineno is not None and argval_col_offset is not None):
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if self._lines[argval_lineno - 1][key_start:key_end] == argkey + "=":
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name and full_name.startswith("tf."):
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class TensorFlowCodeUpgrader(object):
"""Class that handles upgrading a set of Python files to TensorFlow 1.0."""
def __init__(self):
pass
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = TensorFlowCallVisitor(in_filename, lines)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory, copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=False)
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = TensorFlowCodeUpgrader()
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
| {
"content_hash": "70054b8f67f0a1a80eb8f8227ceb00b1",
"timestamp": "",
"source": "github",
"line_count": 688,
"max_line_length": 82,
"avg_line_length": 37.21656976744186,
"alnum_prop": 0.6000781097441906,
"repo_name": "sugartom/tensorflow-alien",
"id": "43bee46f942e3f5e4e20375a361226cdf4bdd499",
"size": "26294",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/tools/compatibility/tf_upgrade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175397"
},
{
"name": "C++",
"bytes": "21819497"
},
{
"name": "CMake",
"bytes": "131374"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "781191"
},
{
"name": "HTML",
"bytes": "558790"
},
{
"name": "Java",
"bytes": "279506"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833831"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36991"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64656"
},
{
"name": "Protocol Buffer",
"bytes": "199996"
},
{
"name": "Python",
"bytes": "18062852"
},
{
"name": "Shell",
"bytes": "325198"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
} |
__author__ = 'RemiZOffAlex'
__copyright__ = '(c) RemiZOffAlex'
__license__ = 'MIT'
__email__ = '[email protected]'
__url__ = 'http://remizoffalex.ru'
from functools import wraps
from flask import (
Flask,
Markup,
session,
request,
g,
url_for,
escape,
redirect,
render_template,
Response,
jsonify,
abort,
send_file
)
import io
import os
import json
import email
import poplib
# Crypt and hash
import uuid
import hashlib
import datetime
from sqlalchemy import and_, or_
# RemiZOffAlex
from . import app, lib, models, forms
@app.route('/')
def index():
"""Главная страница"""
pagedata = {'title': ' - '.format(app.config['TITLE'])}
body = render_template('index.html', pagedata=pagedata)
return body
@app.route('/certificates', defaults={'page': 1})
@app.route('/certificates/<int:page>')
def certificates(page):
"""Список сертификатов"""
pagedata = {'title': 'Список сертификатов - '.format(app.config['TITLE'])}
pagedata['certificates'] = models.db_session.query(
models.Certificate
)
pagedata['pagination'] = lib.Pagination(
page,
10,
pagedata['certificates'].count()
)
pagedata['pagination'].url = '/certificates'
pagedata['certificates'] = lib.getpage(pagedata['certificates'], page)
pagedata['certificates'] = pagedata['certificates'].all()
body = render_template('certificates.html', pagedata=pagedata)
return body
@app.route('/generateca', methods=['GET', 'POST'])
def generateca():
pagedata = {'title': app.config['TITLE']}
pagedata['form'] = forms.NewCertificateForm(request.form)
if request.method == 'POST':
from OpenSSL import crypto, SSL
# Создать пару ключей
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
# Создать самоподписанный сертификат
cert = crypto.X509()
if pagedata['form'].C.data:
cert.get_subject().C = pagedata['form'].C.data
if pagedata['form'].ST.data:
cert.get_subject().ST = pagedata['form'].ST.data
if pagedata['form'].L.data:
cert.get_subject().L = pagedata['form'].L.data
if pagedata['form'].O.data:
cert.get_subject().O = pagedata['form'].O.data
if pagedata['form'].OU.data:
cert.get_subject().OU = pagedata['form'].OU.data
if pagedata['form'].CN.data:
cert.get_subject().CN = pagedata['form'].CN.data
cert.set_serial_number(1)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60) # 10 лет
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
cert.sign(k, 'sha256')
cert.sign(k, 'sha512')
pagedata['cert'] = crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode('utf-8')
pagedata['key'] = crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode('utf-8')
body = render_template('certificate_view.html', pagedata=pagedata)
return body
body = render_template('generateca.html', pagedata=pagedata)
return body
@app.route('/menu')
def menu():
"""Меню"""
pagedata = {'title': app.config['TITLE']}
body = render_template('menu.html', pagedata=pagedata)
return body
# noinspection PyUnusedLocal
@app.errorhandler(404)
def error_missing(exception):
pagedata = {}
pagedata['title'] = app.config['TITLE']
error_message = "Не судьба..."
return render_template("error.html", error_code=404, error_message=error_message, pagedata=pagedata), 404
# noinspection PyUnusedLocal
@app.errorhandler(403)
def error_unauthorized(exception):
pagedata = {}
pagedata['title'] = app.config['TITLE']
error_message = "У Вас нет достаточных прав для доступа к данному ресурсу"
return render_template("error.html", error_code=403, error_message=error_message, pagedata=pagedata), 403
# noinspection PyUnusedLocal
@app.errorhandler(500)
def error_crash(exception):
pagedata = {}
pagedata['title'] = app.config['TITLE']
error_message = "Вот незадача..."
return render_template("error.html", error_code=500, error_message=error_message, pagedata=pagedata), 500
| {
"content_hash": "e9e46aa2dbb6b6871fe0d38d1f058f60",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 109,
"avg_line_length": 30.608695652173914,
"alnum_prop": 0.6418087121212122,
"repo_name": "RemiZOffAlex/pycertauth",
"id": "ae837b13ebb4ddf4b63e5f6e2f16e04c554d7e1a",
"size": "4447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycertauth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89085"
},
{
"name": "HTML",
"bytes": "230861"
},
{
"name": "JavaScript",
"bytes": "12147"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "38335"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
} |
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from keystoneclient import exceptions as keystone_exceptions
from mox import IsA
from horizon import api
from horizon import test
SYSPANEL_INDEX_URL = reverse('horizon:syspanel:overview:index')
DASH_INDEX_URL = reverse('horizon:nova:overview:index')
class AuthViewTests(test.BaseViewTests):
def setUp(self):
super(AuthViewTests, self).setUp()
self.setActiveUser()
self.PASSWORD = 'secret'
def test_login_index(self):
res = self.client.get(reverse('horizon:auth_login'))
self.assertTemplateUsed(res, 'splash.html')
def test_login_user_logged_in(self):
self.setActiveUser(self.TEST_TOKEN, self.TEST_USER, self.TEST_TENANT,
False, self.TEST_SERVICE_CATALOG)
res = self.client.get(reverse('horizon:auth_login'))
self.assertRedirectsNoFollow(res, DASH_INDEX_URL)
def test_login_no_tenants(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
form_data = {'method': 'Login',
'region': 'http://localhost:5000/v2.0,local',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
class FakeToken(object):
id = TOKEN_ID,
user = {'roles': [{'name': 'fake'}]},
serviceCatalog = {}
aToken = api.Token(FakeToken())
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([])
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest),
IsA(unicode),
extra_tags=IsA(str))
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
def test_login(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
form_data = {'method': 'Login',
'region': 'http://localhost:5000/v2.0,local',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
class FakeToken(object):
id = TOKEN_ID,
user = {"id": "1",
"roles": [{"id": "1", "name": "fake"}], "name": "user"}
serviceCatalog = {}
tenant = None
aToken = api.Token(FakeToken())
bToken = aToken
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
bToken.tenant = {'id': aTenant.id, 'name': aTenant.name}
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([aTenant])
self.mox.StubOutWithMock(api, 'token_create_scoped')
api.token_create_scoped(IsA(http.HttpRequest), aTenant.id,
aToken.id).AndReturn(bToken)
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:auth_login'), form_data)
self.assertRedirectsNoFollow(res, DASH_INDEX_URL)
def test_login_invalid_credentials(self):
self.mox.StubOutWithMock(api, 'token_create')
unauthorized = keystone_exceptions.Unauthorized("Invalid")
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndRaise(unauthorized)
self.mox.ReplayAll()
form_data = {'method': 'Login',
'region': 'http://localhost:5000/v2.0,local',
'password': self.PASSWORD,
'username': self.TEST_USER}
res = self.client.post(reverse('horizon:auth_login'),
form_data,
follow=True)
self.assertTemplateUsed(res, 'splash.html')
def test_login_exception(self):
self.mox.StubOutWithMock(api, 'token_create')
ex = keystone_exceptions.BadRequest('Cannot talk to keystone')
api.token_create(IsA(http.HttpRequest),
"",
self.TEST_USER,
self.PASSWORD).AndRaise(ex)
self.mox.ReplayAll()
form_data = {'method': 'Login',
'region': 'http://localhost:5000/v2.0,local',
'password': self.PASSWORD,
'username': self.TEST_USER}
res = self.client.post(reverse('horizon:auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
def test_switch_tenants_index(self):
res = self.client.get(reverse('horizon:auth_switch',
args=[self.TEST_TENANT]))
self.assertRedirects(res, reverse("horizon:auth_login"))
def test_switch_tenants(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
tenants = self.TEST_CONTEXT['authorized_tenants']
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
aToken = self.mox.CreateMock(api.Token)
aToken.id = TOKEN_ID
aToken.user = {'id': self.TEST_USER_ID,
'name': self.TEST_USER, 'roles': [{'name': 'fake'}]}
aToken.serviceCatalog = {}
aToken.tenant = {'id': aTenant.id, 'name': aTenant.name}
self.setActiveUser(id=self.TEST_USER_ID,
token=self.TEST_TOKEN,
username=self.TEST_USER,
tenant_id=self.TEST_TENANT,
service_catalog=self.TEST_SERVICE_CATALOG,
authorized_tenants=tenants)
self.mox.StubOutWithMock(api, 'token_create')
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.token_create(IsA(http.HttpRequest), NEW_TENANT_ID, self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id) \
.AndReturn([aTenant])
self.mox.ReplayAll()
form_data = {'method': 'LoginWithTenant',
'region': 'http://localhost:5000/v2.0,local',
'password': self.PASSWORD,
'tenant': NEW_TENANT_ID,
'username': self.TEST_USER}
res = self.client.post(reverse('horizon:auth_switch',
args=[NEW_TENANT_ID]), form_data)
self.assertRedirectsNoFollow(res, DASH_INDEX_URL)
self.assertEqual(self.client.session['tenant'], NEW_TENANT_NAME)
def test_logout(self):
KEY = 'arbitraryKeyString'
VALUE = 'arbitraryKeyValue'
self.assertNotIn(KEY, self.client.session)
self.client.session[KEY] = VALUE
res = self.client.get(reverse('horizon:auth_logout'))
self.assertRedirectsNoFollow(res, reverse('splash'))
self.assertNotIn(KEY, self.client.session)
| {
"content_hash": "809b5130f4698e24e42be3806c9d984b",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 78,
"avg_line_length": 36.995215311004785,
"alnum_prop": 0.5624676668391102,
"repo_name": "andrewsmedina/horizon",
"id": "cbe4dab65ea6df8e6f18efc9fba6b5869d2a61d0",
"size": "8541",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "horizon/horizon/tests/auth_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from random import shuffle
def bogosort(seq):
while(not all(seq[i] <= seq[i + 1] for i in range(len(seq) - 1))):
shuffle(seq)
return seq | {
"content_hash": "37f5a833300e9b671347416c8de435e0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 70,
"avg_line_length": 22,
"alnum_prop": 0.6103896103896104,
"repo_name": "wizh/algorithms",
"id": "6b25ddd62f14af20e8240985d2c0eb58b4a70cc1",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Sorting/Bogosort/bogosort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2237"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
from django.core.management import call_command
from trusts import ENTITY_MODEL_NAME, GROUP_MODEL_NAME, PERMISSION_MODEL_NAME, DEFAULT_SETTLOR, ALLOW_NULL_SETTLOR, ROOT_PK
import trusts.models
def forward(apps, schema_editor):
if getattr(settings, 'TRUSTS_CREATE_ROOT', True):
call_command('create_trust_root', apps=apps)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='Trust',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('title', models.CharField(verbose_name='title', max_length=40)),
('settlor', models.ForeignKey(to=ENTITY_MODEL_NAME, default=DEFAULT_SETTLOR, null=ALLOW_NULL_SETTLOR)),
('trust', models.ForeignKey(to='trusts.Trust', related_name='trusts_trust_content', default=ROOT_PK)),
('groups', models.ManyToManyField(to=GROUP_MODEL_NAME, related_name='trusts', verbose_name='groups', help_text='The groups this trust grants permissions to. A user willget all permissions granted to each of his/her group.')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'read'),
},
bases=(trusts.models.ReadonlyFieldsMixin, models.Model),
),
migrations.CreateModel(
name='TrustUserPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('entity', models.ForeignKey(to=ENTITY_MODEL_NAME, related_name='trustpermissions')),
('permission', models.ForeignKey(to=PERMISSION_MODEL_NAME, related_name='trustentities')),
('trust', models.ForeignKey(to='trusts.Trust', related_name='trustees')),
],
),
migrations.CreateModel(
name='RolePermission',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('managed', models.BooleanField(default=False)),
('permission', models.ForeignKey(to='auth.Permission', related_name='rolepermissions')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True, help_text="The name of the role. Corresponds to the key of model's trusts option.")),
('groups', models.ManyToManyField(related_name='roles', verbose_name='groups', to='auth.Group')),
('permissions', models.ManyToManyField(to='auth.Permission', related_name='roles', through='trusts.RolePermission', verbose_name='permissions')),
],
),
migrations.AddField(
model_name='rolepermission',
name='role',
field=models.ForeignKey(to='trusts.Role', related_name='rolepermissions'),
),
migrations.AlterUniqueTogether(
name='trust',
unique_together=set([('settlor', 'title')]),
),
migrations.AlterUniqueTogether(
name='rolepermission',
unique_together=set([('role', 'permission')]),
),
migrations.AlterUniqueTogether(
name='trustuserpermission',
unique_together=set([('trust', 'entity', 'permission')]),
),
migrations.RunPython(forward, backward)
]
| {
"content_hash": "662fa30acfc91f8689df5ab9de53aec8",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 241,
"avg_line_length": 46.892857142857146,
"alnum_prop": 0.6097994414826098,
"repo_name": "beedesk/django-trusts",
"id": "6d20fd8ab7c0de57d9599f3fdbedc81b6a72898a",
"size": "3963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trusts/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "77791"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0060_false_p_dedupe_indices'),
]
def disable_webhook_secret_for_existing_installs(apps, schema_editor):
system_settings = apps.get_model('dojo', 'system_settings')
try:
# for existing install we disable the webhook security as we don't want to break those installs
ss = system_settings.objects.all().first()
if ss.enable_jira:
# new install do not have jira enabled, so we can assume this is an existing install
ss.disable_jira_webhook_secret = True
ss.save()
except Exception as e:
# probably a test run such as running unittest, no values in table
pass
operations = [
migrations.AddField(
model_name='system_settings',
name='disable_jira_webhook_secret',
field=models.BooleanField(default=False, help_text='Allows incoming requests without a secret (discouraged legacy behaviour)', verbose_name='Disable web hook secret'),
),
migrations.AddField(
model_name='system_settings',
name='jira_webhook_secret',
field=models.CharField(help_text='Secret needed in URL for incoming JIRA Webhook', max_length=64, null=True, verbose_name='JIRA Webhook URL'),
),
migrations.AlterField(
model_name='system_settings',
name='enable_jira_web_hook',
field=models.BooleanField(default=False, help_text='Please note: It is strongly recommended to use a secret below and / or IP whitelist the JIRA server using a proxy such as Nginx.', verbose_name='Enable JIRA web hook'),
),
migrations.RunPython(disable_webhook_secret_for_existing_installs)
]
| {
"content_hash": "bae182e41e72c9136df3640fa40d1287",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 232,
"avg_line_length": 45.48780487804878,
"alnum_prop": 0.6386058981233244,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "17568ddb148b418a681ee6c3f7c3e0670a9d6007",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/db_migrations/0061_jira_webhook_secret.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
from django.db.models import Q
from django_jinja import library
from kitsune.karma.models import Title
@library.global_function
def karma_titles(user):
"""Return a list of titles for a given user."""
# Titles assigned to the user or groups
return Title.objects.filter(
Q(users=user) | Q(groups__in=user.groups.all())).distinct()
| {
"content_hash": "91feca317befeb000e1b656e34087c2e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 27.153846153846153,
"alnum_prop": 0.7138810198300283,
"repo_name": "brittanystoroz/kitsune",
"id": "210f693be19fe357d64a920ff8c6bf1e24904f36",
"size": "353",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "kitsune/karma/templatetags/jinja_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2502"
},
{
"name": "CSS",
"bytes": "288314"
},
{
"name": "HTML",
"bytes": "619431"
},
{
"name": "JavaScript",
"bytes": "773968"
},
{
"name": "Python",
"bytes": "2824944"
},
{
"name": "Shell",
"bytes": "12697"
},
{
"name": "Smarty",
"bytes": "1957"
}
],
"symlink_target": ""
} |
import asyncio
import functools
import websockets
from websockets import handshake
from django.http import HttpResponse, HttpResponseServerError
def websocket(handler):
"""Decorator for WebSocket handlers."""
@functools.wraps(handler)
def wrapper(request, *args, **kwargs):
environ = request.META
try:
assert environ['wsgi.async']
reader = environ['async.reader']
writer = environ['async.writer']
# When the following assertions fail, insert an `import pdb;
# pdb.set_trace()` here and look for internal changes in aiohttp.
assert isinstance(reader, asyncio.streams.StreamReader)
assert isinstance(writer, asyncio.streams.StreamWriter)
# Extract the actual protocol and transport.
http_protocol = writer._protocol
transport = http_protocol.transport
assert http_protocol.reader is reader
assert http_protocol.writer is writer
assert reader._transport is transport
assert writer._transport is transport
assert transport._protocol is http_protocol
except (AssertionError, KeyError) as e: # pragma: no cover
# When the handshake fails (500), insert a `raise` here.
return HttpResponseServerError("Unsupported WSGI server: %s." % e)
@asyncio.coroutine
def run_ws_handler(ws):
yield from handler(ws, *args, **kwargs)
yield from ws.close()
def switch_protocols():
# Switch transport from http_protocol to ws_protocol (YOLO).
ws_protocol = websockets.WebSocketCommonProtocol()
transport._protocol = ws_protocol
ws_protocol.connection_made(transport)
# Ensure aiohttp doesn't interfere.
http_protocol.transport = None
# Fire'n'forget the WebSocket handler.
asyncio.async(run_ws_handler(ws_protocol))
return WebSocketResponse(environ, switch_protocols)
return wrapper
class WebSocketResponse(HttpResponse):
"""Upgrade from a WSGI connection with the WebSocket handshake."""
status_code = 101
def __init__(self, environ, switch_protocols):
super().__init__()
http_1_1 = environ['SERVER_PROTOCOL'] == 'HTTP/1.1'
get_header = lambda k: environ['HTTP_' + k.upper().replace('-', '_')]
key = handshake.check_request(get_header)
if not http_1_1 or key is None:
self.status_code = 400
self.content = "Invalid WebSocket handshake.\n"
else:
self._headers = {} # Reset headers (private API!)
set_header = self.__setitem__
handshake.build_response(set_header, key)
# Here be dragons.
self.close = switch_protocols
| {
"content_hash": "d0ad3888cf71808d052b4dec64062ef9",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 78,
"avg_line_length": 34.74698795180723,
"alnum_prop": 0.6137309292649098,
"repo_name": "aaugustin/django-c10k-demo",
"id": "3c2a9484b575631858d4a8a48b90bc839676a3ee",
"size": "2884",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "c10ktools/http/websockets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133"
},
{
"name": "HTML",
"bytes": "1735"
},
{
"name": "JavaScript",
"bytes": "1453"
},
{
"name": "Makefile",
"bytes": "281"
},
{
"name": "Python",
"bytes": "21867"
}
],
"symlink_target": ""
} |
import os
from datetime import datetime
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from .models import File, Link
class FileTestCase(TestCase):
def setUp(self):
self.path = os.path.join(settings.MEDIA_ROOT,
datetime.now().strftime(File.UPLOAD_TO))
if not os.path.isdir(self.path):
os.makedirs(self.path)
self.total_files = len(os.listdir(self.path))
def tst_anonymous_user(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('files-list'))
self.assertEqual(response.status_code, 302)
def tst_authorization(self):
data = {'username': 'qqq', 'password': 'qqq'}
response = self.client.post(reverse('index'), data)
self.assertEqual(response.status_code, 404)
def tst_registration1(self):
data = {'username': 'qqq', 'password': 'qqq', 'is_register': 1}
response = self.client.post(reverse('index'), data)
self.assertEqual(response.status_code, 200)
self.assertTrue('sessionid' in response.cookies)
def tst_authorized_user(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('files-list'))
self.assertEqual(response.status_code, 200)
self.assertEqual([], response.json())
def tst_file_upload1(self):
data = {'file': SimpleUploadedFile('xxx.txt', b'x' * 4096)}
response = self.client.post(reverse('files-list'), data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Link.objects.all().count(), 1)
self.assertEqual(File.objects.all().count(), 1)
with self.assertRaises(ValueError):
response.json()
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 1)
def tst_file_upload2(self):
"""Дубликат первого."""
data = {'file': SimpleUploadedFile('qqq.txt', b'x' * 4096)}
response = self.client.post(reverse('files-list'), data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Link.objects.all().count(), 2)
self.assertEqual(File.objects.all().count(), 1)
self.assertEqual([['qqq', 'xxx.txt']], response.json())
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 1)
def tst_file_upload3(self):
"""Дубликат первого, но на один байт больше (одинаковый MD5)."""
data = {'file': SimpleUploadedFile('hhh.txt', b'x' * 4097)}
response = self.client.post(reverse('files-list'), data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Link.objects.all().count(), 3)
self.assertEqual(File.objects.all().count(), 2)
with self.assertRaises(ValueError):
response.json()
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 2)
def tst_file_download1(self):
response = self.client.get(reverse('download',
kwargs={'pk': 1, 'name': 'any.ext'}))
self.assertEqual(response.status_code, 200)
def tst_file_download2(self):
"""Анонимно."""
self.client.cookies.load({'sessionid': ''})
response = self.client.get(reverse('download',
kwargs={'pk': 2, 'name': 'any.ext'}))
self.assertEqual(response.status_code, 200)
def tst_file_download404(self):
"""Несуществующий файл."""
response = self.client.get(reverse('download',
kwargs={'pk': 404, 'name': 'any.ext'}))
self.assertEqual(response.status_code, 404)
def tst_registration2(self):
data = {'username': 'hhh', 'password': 'hhh', 'is_register': 1}
response = self.client.post(reverse('index'), data)
self.assertEqual(response.status_code, 200)
self.assertTrue('sessionid' in response.cookies)
def tst_file_upload4(self):
data = {'file': SimpleUploadedFile('kkk.txt', b'q' * 4096)}
response = self.client.post(reverse('files-list'), data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Link.objects.all().count(), 4)
self.assertEqual(File.objects.all().count(), 3)
with self.assertRaises(ValueError):
response.json()
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 3)
def tst_file_upload5(self):
"""Дубликат первого и второго."""
data = {'file': SimpleUploadedFile('mmm.txt', b'x' * 4096)}
response = self.client.post(reverse('files-list'), data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Link.objects.all().count(), 5)
self.assertEqual(File.objects.all().count(), 3)
self.assertEqual([['qqq', 'xxx.txt'], ['qqq', 'qqq.txt']], response.json())
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 3)
def tst_file_delete5(self):
"""Удаление дубликата первого и второго."""
response = self.client.delete(reverse('files-detail', kwargs={'pk': 5}))
self.assertEqual(response.status_code, 204)
self.assertEqual(Link.objects.all().count(), 4)
self.assertEqual(File.objects.all().count(), 3)
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 3)
def tst_file_delete4(self):
response = self.client.delete(reverse('files-detail', kwargs={'pk': 4}))
self.assertEqual(response.status_code, 204)
self.assertEqual(Link.objects.all().count(), 3)
self.assertEqual(File.objects.all().count(), 2)
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 2)
def tst_file_delete404(self):
"""Удаление файла, которого нет."""
response = self.client.delete(reverse('files-detail', kwargs={'pk': 404}))
self.assertEqual(response.status_code, 404)
def tearDown(self):
for link in Link.objects.all():
link.delete()
self.assertEqual(Link.objects.all().count(), 0)
self.assertEqual(File.objects.all().count(), 0)
self.assertEqual(len(os.listdir(self.path)) - self.total_files, 0)
def test_files(self):
self.tst_anonymous_user()
self.tst_authorization()
self.tst_registration1()
self.tst_authorized_user()
self.tst_file_upload1()
self.tst_file_upload2()
self.tst_file_upload3()
self.tst_file_download1()
self.tst_file_download2()
self.tst_file_download404()
self.tst_registration2()
self.tst_file_upload4()
self.tst_file_upload5()
self.tst_file_delete5()
self.tst_file_delete4()
self.tst_file_delete404()
| {
"content_hash": "0a94f907f2633b86b2b2d0ac571486c3",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 83,
"avg_line_length": 41.68862275449102,
"alnum_prop": 0.6192186153404194,
"repo_name": "chaos-soft/chocola",
"id": "a9efa01bf9c40a0ea21bf1a47a6fc7f67f30b53d",
"size": "7125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "970"
},
{
"name": "JavaScript",
"bytes": "4015"
},
{
"name": "Python",
"bytes": "29425"
}
],
"symlink_target": ""
} |
'''
Created on Oct 4, 2014
@author: theo
'''
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView
from acacia.data.models import Project, MeetLocatie, TabGroup, KeyFigure
from acacia.data.views import ProjectDetailView
class HomeView(ProjectDetailView):
template_name = 'spaarwater_detail.html'
def get_object(self):
return get_object_or_404(Project,pk=1)
class DashGroupView(TemplateView):
template_name = 'dashgroup.html'
def get_context_data(self, **kwargs):
context = super(DashGroupView,self).get_context_data(**kwargs)
name = context.get('name')
page = int(self.request.GET.get('page', 1))
group = get_object_or_404(TabGroup, name__iexact=name)
dashboards =[p.dashboard for p in group.tabpage_set.order_by('order')]
context['group'] = group
page = min(page, len(dashboards))
if page > 0:
pages = list(group.pages())
context['title'] = 'Dashboard %s - %s' % (group.name, pages[page-1].name)
context['page'] = int(page)
context['dashboard'] = dashboards[page-1]
return context
class OverviewView(TemplateView):
template_name = 'overview.html'
def get_context_data(self, **kwargs):
context = super(OverviewView,self).get_context_data(**kwargs)
pk = context.get('pk',1)
locatie = get_object_or_404(MeetLocatie,pk=pk)
context['locatie'] = locatie
keys = KeyFigure.objects.filter(locatie=locatie)
for key in keys:
context[key.name] = key.value
return context
| {
"content_hash": "84b89bbe289eea6887cb9b19747d0994",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 85,
"avg_line_length": 33.89795918367347,
"alnum_prop": 0.6351595424443106,
"repo_name": "acaciawater/spaarwater",
"id": "10b9fa5ec0284097831b209e2a0d816d0a67829d",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spaarwater/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10404"
},
{
"name": "Python",
"bytes": "14477"
}
],
"symlink_target": ""
} |
import abc
import argparse
import os
import six
from stevedore import extension
from . import exceptions
_discovered_plugins = {}
def discover_auth_systems():
"""Discover the available auth-systems.
This won't take into account the old style auth-systems.
"""
global _discovered_plugins
_discovered_plugins = {}
def add_plugin(ext):
_discovered_plugins[ext.name] = ext.plugin
ep_namespace = "dhclient.openstack.common.apiclient.auth"
mgr = extension.ExtensionManager(ep_namespace)
mgr.map(add_plugin)
def load_auth_system_opts(parser):
"""Load options needed by the available auth-systems into a parser.
This function will try to populate the parser with options from the
available plugins.
"""
group = parser.add_argument_group("Common auth options")
BaseAuthPlugin.add_common_opts(group)
for name, auth_plugin in six.iteritems(_discovered_plugins):
group = parser.add_argument_group(
"Auth-system '%s' options" % name,
conflict_handler="resolve")
auth_plugin.add_opts(group)
def load_plugin(auth_system):
try:
plugin_class = _discovered_plugins[auth_system]
except KeyError:
raise exceptions.AuthSystemNotFound(auth_system)
return plugin_class(auth_system=auth_system)
def load_plugin_from_args(args):
"""Load required plugin and populate it with options.
Try to guess auth system if it is not specified. Systems are tried in
alphabetical order.
:type args: argparse.Namespace
:raises: AuthPluginOptionsMissing
"""
auth_system = args.os_auth_system
if auth_system:
plugin = load_plugin(auth_system)
plugin.parse_opts(args)
plugin.sufficient_options()
return plugin
for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)):
plugin_class = _discovered_plugins[plugin_auth_system]
plugin = plugin_class()
plugin.parse_opts(args)
try:
plugin.sufficient_options()
except exceptions.AuthPluginOptionsMissing:
continue
return plugin
raise exceptions.AuthPluginOptionsMissing(["auth_system"])
@six.add_metaclass(abc.ABCMeta)
class BaseAuthPlugin(object):
"""Base class for authentication plugins.
An authentication plugin needs to override at least the authenticate
method to be a valid plugin.
"""
auth_system = None
opt_names = []
common_opt_names = [
"auth_system",
"username",
"password",
"tenant_name",
"token",
"auth_url",
]
def __init__(self, auth_system=None, **kwargs):
self.auth_system = auth_system or self.auth_system
self.opts = dict((name, kwargs.get(name))
for name in self.opt_names)
@staticmethod
def _parser_add_opt(parser, opt):
"""Add an option to parser in two variants.
:param opt: option name (with underscores)
"""
dashed_opt = opt.replace("_", "-")
env_var = "OS_%s" % opt.upper()
arg_default = os.environ.get(env_var, "")
arg_help = "Defaults to env[%s]." % env_var
parser.add_argument(
"--os-%s" % dashed_opt,
metavar="<%s>" % dashed_opt,
default=arg_default,
help=arg_help)
parser.add_argument(
"--os_%s" % opt,
metavar="<%s>" % dashed_opt,
help=argparse.SUPPRESS)
@classmethod
def add_opts(cls, parser):
"""Populate the parser with the options for this plugin.
"""
for opt in cls.opt_names:
# use `BaseAuthPlugin.common_opt_names` since it is never
# changed in child classes
if opt not in BaseAuthPlugin.common_opt_names:
cls._parser_add_opt(parser, opt)
@classmethod
def add_common_opts(cls, parser):
"""Add options that are common for several plugins.
"""
for opt in cls.common_opt_names:
cls._parser_add_opt(parser, opt)
@staticmethod
def get_opt(opt_name, args):
"""Return option name and value.
:param opt_name: name of the option, e.g., "username"
:param args: parsed arguments
"""
return (opt_name, getattr(args, "os_%s" % opt_name, None))
def parse_opts(self, args):
"""Parse the actual auth-system options if any.
This method is expected to populate the attribute `self.opts` with a
dict containing the options and values needed to make authentication.
"""
self.opts.update(dict(self.get_opt(opt_name, args)
for opt_name in self.opt_names))
def authenticate(self, http_client):
"""Authenticate using plugin defined method.
The method usually analyses `self.opts` and performs
a request to authentication server.
:param http_client: client object that needs authentication
:type http_client: HTTPClient
:raises: AuthorizationFailure
"""
self.sufficient_options()
self._do_authenticate(http_client)
@abc.abstractmethod
def _do_authenticate(self, http_client):
"""Protected method for authentication.
"""
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
missing = [opt
for opt in self.opt_names
if not self.opts.get(opt)]
if missing:
raise exceptions.AuthPluginOptionsMissing(missing)
@abc.abstractmethod
def token_and_endpoint(self, endpoint_type, service_type):
"""Return token and endpoint.
:param service_type: Service type of the endpoint
:type service_type: string
:param endpoint_type: Type of endpoint.
Possible values: public or publicURL,
internal or internalURL,
admin or adminURL
:type endpoint_type: string
:returns: tuple of token and endpoint strings
:raises: EndpointException
"""
| {
"content_hash": "c3ff2a91953e36d688fc9b816dac6dd5",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 77,
"avg_line_length": 30.801980198019802,
"alnum_prop": 0.6133076181292189,
"repo_name": "nttcom/eclcli",
"id": "2168b59c5929f6951b2bb2dfa5b613a2e8529315",
"size": "6222",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eclcli/dh/dhclient/common/apiclient/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2087533"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.billing import BillingManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-billing
# USAGE
python product.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = BillingManagementClient(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.products.get(
billing_account_name="{billingAccountName}",
product_name="{productName}",
)
print(response)
# x-ms-original-file: specification/billing/resource-manager/Microsoft.Billing/stable/2020-05-01/examples/Product.json
if __name__ == "__main__":
main()
| {
"content_hash": "ec4af532660238e8cd2a7d0f7678368a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 118,
"avg_line_length": 31.727272727272727,
"alnum_prop": 0.7249283667621776,
"repo_name": "Azure/azure-sdk-for-python",
"id": "cfb1f1eb3cd8093f5d4b71b0c0b53b57687c406c",
"size": "1515",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/billing/azure-mgmt-billing/generated_samples/product.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
byceps.blueprints.admin.shop.storefront.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, request
from flask_babel import gettext
from .....services.brand import service as brand_service
from .....services.shop.catalog import service as catalog_service
from .....services.shop.order import sequence_service as order_sequence_service
from .....services.shop.shop import service as shop_service
from .....services.shop.shop.transfer.models import Shop, ShopID
from .....services.shop.storefront import service as storefront_service
from .....services.shop.storefront.transfer.models import (
Storefront,
StorefrontID,
)
from .....util.framework.blueprint import create_blueprint
from .....util.framework.flash import flash_error, flash_success
from .....util.framework.templating import templated
from .....util.views import permission_required, redirect_to
from .forms import StorefrontCreateForm, StorefrontUpdateForm
blueprint = create_blueprint('shop_storefront_admin', __name__)
@blueprint.get('/for_shop/<shop_id>')
@permission_required('shop.view')
@templated
def index_for_shop(shop_id):
"""List storefronts for that shop."""
shop = _get_shop_or_404(shop_id)
brand = brand_service.get_brand(shop.brand_id)
storefronts = storefront_service.get_storefronts_for_shop(shop.id)
order_number_prefixes_by_sequence_id = (
_get_order_number_prefixes_by_sequence_id(storefronts, shop.id)
)
return {
'shop': shop,
'brand': brand,
'storefronts': storefronts,
'order_number_prefixes_by_sequence_id': order_number_prefixes_by_sequence_id,
}
def _get_order_number_prefixes_by_sequence_id(storefronts, shop_id):
sequence_ids = {sf.order_number_sequence_id for sf in storefronts}
sequences = order_sequence_service.get_order_number_sequences_for_shop(
shop_id
)
return {seq.id: seq.prefix for seq in sequences}
@blueprint.get('/<storefront_id>')
@permission_required('shop.view')
@templated
def view(storefront_id):
"""Show a single storefront."""
storefront = _get_storefront_or_404(storefront_id)
shop = shop_service.get_shop(storefront.shop_id)
brand = brand_service.get_brand(shop.brand_id)
order_number_sequence = order_sequence_service.get_order_number_sequence(
storefront.order_number_sequence_id
)
order_number_prefix = order_number_sequence.prefix
return {
'storefront': storefront,
'shop': shop,
'brand': brand,
'order_number_prefix': order_number_prefix,
}
@blueprint.get('/for_shop/<shop_id>/create')
@permission_required('shop.create')
@templated
def create_form(shop_id, erroneous_form=None):
"""Show form to create a storefront."""
shop = _get_shop_or_404(shop_id)
brand = brand_service.get_brand(shop.brand_id)
catalogs = catalog_service.get_catalogs_for_shop(shop.id)
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(shop.id)
)
order_number_sequence_available = bool(order_number_sequences)
form = erroneous_form if erroneous_form else StorefrontCreateForm()
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
return {
'shop': shop,
'brand': brand,
'order_number_sequence_available': order_number_sequence_available,
'form': form,
}
@blueprint.post('/for_shop/<shop_id>')
@permission_required('shop.create')
def create(shop_id):
"""Create a storefront."""
shop = _get_shop_or_404(shop_id)
form = StorefrontCreateForm(request.form)
catalogs = catalog_service.get_catalogs_for_shop(shop.id)
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(shop.id)
)
if not order_number_sequences:
flash_error(
gettext('No order number sequences are defined for this shop.')
)
return create_form(shop_id, form)
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
if not form.validate():
return create_form(shop_id, form)
storefront_id = form.id.data.strip()
catalog_id = form.catalog_id.data or None
order_number_sequence_id = form.order_number_sequence_id.data
if not order_number_sequence_id:
flash_error(gettext('No valid order number sequence was specified.'))
return create_form(shop_id, form)
order_number_sequence = order_sequence_service.get_order_number_sequence(
order_number_sequence_id
)
if order_number_sequence.shop_id != shop.id:
flash_error(gettext('No valid order number sequence was specified.'))
return create_form(shop_id, form)
try:
item_number = order_sequence_service.generate_order_number(
order_number_sequence.id
)
except order_sequence_service.OrderNumberGenerationFailed as e:
abort(500, e.message)
storefront = storefront_service.create_storefront(
storefront_id,
shop.id,
order_number_sequence.id,
closed=False,
catalog_id=catalog_id,
)
flash_success(
gettext(
'Storefront "%(storefront_id)s" has been created.',
storefront_id=storefront.id,
)
)
return redirect_to('.view', storefront_id=storefront.id)
@blueprint.get('/<storefront_id>/update')
@permission_required('shop.update')
@templated
def update_form(storefront_id, erroneous_form=None):
"""Show form to update a storefront."""
storefront = _get_storefront_or_404(storefront_id)
shop = shop_service.get_shop(storefront.shop_id)
brand = brand_service.get_brand(shop.brand_id)
catalogs = catalog_service.get_catalogs_for_shop(storefront.shop_id)
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(shop.id)
)
form = (
erroneous_form
if erroneous_form
else StorefrontUpdateForm(obj=storefront)
)
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
return {
'storefront': storefront,
'shop': shop,
'brand': brand,
'form': form,
}
@blueprint.post('/<storefront_id>')
@permission_required('shop.update')
def update(storefront_id):
"""Update a storefront."""
storefront = _get_storefront_or_404(storefront_id)
catalogs = catalog_service.get_catalogs_for_shop(storefront.shop_id)
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(
storefront.shop_id
)
)
form = StorefrontUpdateForm(request.form)
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
if not form.validate():
return update_form(storefront_id, form)
order_number_sequence_id = form.order_number_sequence_id.data
catalog_id = form.catalog_id.data or None
closed = form.closed.data
storefront = storefront_service.update_storefront(
storefront.id, catalog_id, order_number_sequence_id, closed
)
flash_success(
gettext(
'Storefront "%(storefront_id)s" has been updated.',
storefront_id=storefront.id,
)
)
return redirect_to('.view', storefront_id=storefront.id)
def _get_shop_or_404(shop_id: ShopID) -> Shop:
shop = shop_service.find_shop(shop_id)
if shop is None:
abort(404)
return shop
def _get_storefront_or_404(storefront_id: StorefrontID) -> Storefront:
storefront = storefront_service.find_storefront(storefront_id)
if storefront is None:
abort(404)
return storefront
| {
"content_hash": "f30ba531e3a78613244b44e6dffc51db",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 85,
"avg_line_length": 30.279693486590038,
"alnum_prop": 0.6765785144881691,
"repo_name": "homeworkprod/byceps",
"id": "7e2e7ef02831016d70763a372f6b6a2a23c6b889",
"size": "7903",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "byceps/blueprints/admin/shop/storefront/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38198"
},
{
"name": "HTML",
"bytes": "318830"
},
{
"name": "JavaScript",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "935249"
}
],
"symlink_target": ""
} |
import redis
import gaia.config as cfg
config = cfg.load_config('app.yaml') or cfg.load_config('redis.yaml') or cfg.load_config('redis.json') or cfg.load_config('redis.cfg')
client = redis.Redis(**config['redis'])
| {
"content_hash": "5ba184112c2faf1b053b9cd453df253d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 134,
"avg_line_length": 31,
"alnum_prop": 0.7188940092165899,
"repo_name": "caiyunapp/gaiabase",
"id": "00182f9eeae3395dbb350143936067d18655ac3d",
"size": "242",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gaia/db/redis_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7780"
}
],
"symlink_target": ""
} |
"""HTTP authentication-related tests."""
import requests
import pytest
from utils import http, add_auth, HTTP_OK, TestEnvironment
import httpie.input
import httpie.cli
class TestAuth:
def test_basic_auth(self, httpbin):
r = http('--auth=user:password',
'GET', httpbin.url + '/basic-auth/user/password')
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
@pytest.mark.skipif(
requests.__version__ == '0.13.6',
reason='Redirects with prefetch=False are broken in Requests 0.13.6')
def test_digest_auth(self, httpbin):
r = http('--auth-type=digest', '--auth=user:password',
'GET', httpbin.url + '/digest-auth/auth/user/password')
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
def test_password_prompt(self, httpbin):
httpie.input.AuthCredentials._getpass = lambda self, prompt: 'password'
r = http('--auth', 'user',
'GET', httpbin.url + '/basic-auth/user/password')
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
def test_credentials_in_url(self, httpbin):
url = add_auth(httpbin.url + '/basic-auth/user/password',
auth='user:password')
r = http('GET', url)
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
def test_credentials_in_url_auth_flag_has_priority(self, httpbin):
"""When credentials are passed in URL and via -a at the same time,
then the ones from -a are used."""
url = add_auth(httpbin.url + '/basic-auth/user/password',
auth='user:wrong')
r = http('--auth=user:password', 'GET', url)
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
@pytest.mark.parametrize('url', [
'[email protected]',
'username:@example.org',
])
def test_only_username_in_url(self, url):
"""
https://github.com/jakubroztocil/httpie/issues/242
"""
args = httpie.cli.parser.parse_args(args=[url], env=TestEnvironment())
assert args.auth
assert args.auth.key == 'username'
assert args.auth.value == ''
| {
"content_hash": "77ac182c7afe39ae0898ca4bc084be98",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 37.38709677419355,
"alnum_prop": 0.588869715271786,
"repo_name": "Irdroid/httpie",
"id": "5a94ad94098d2bca5042742151fb8a1aa0051396",
"size": "2318",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/test_auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import annotations
import inspect
from typing import (
Callable,
Hashable,
)
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._typing import (
Dtype,
npt,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_scalar,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.indexes.base import (
Index,
maybe_extract_name,
)
class NumericIndex(Index):
"""
Immutable numeric sequence used for indexing and alignment.
The basic object storing axis labels for all pandas objects.
NumericIndex is a special case of `Index` with purely numpy int/uint/float labels.
.. versionadded:: 1.4.0
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: None)
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of purely int64 labels (deprecated).
UInt64Index : Index of purely uint64 labels (deprecated).
Float64Index : Index of purely float64 labels (deprecated).
Notes
-----
An NumericIndex instance can **only** contain numpy int64/32/16/8, uint64/32/16/8 or
float64/32/16 dtype. In particular, ``NumericIndex`` *can not* hold Pandas numeric
dtypes (:class:`Int64Dtype`, :class:`Int32Dtype` etc.).
"""
_typ = "numericindex"
_values: np.ndarray
_default_dtype: np.dtype | None = None
_dtype_validation_metadata: tuple[Callable[..., bool], str] = (
is_numeric_dtype,
"numeric type",
)
_is_numeric_dtype = True
_can_hold_strings = False
_is_backward_compat_public_numeric_index: bool = True
_engine_types: dict[np.dtype, type[libindex.IndexEngine]] = {
np.dtype(np.int8): libindex.Int8Engine,
np.dtype(np.int16): libindex.Int16Engine,
np.dtype(np.int32): libindex.Int32Engine,
np.dtype(np.int64): libindex.Int64Engine,
np.dtype(np.uint8): libindex.UInt8Engine,
np.dtype(np.uint16): libindex.UInt16Engine,
np.dtype(np.uint32): libindex.UInt32Engine,
np.dtype(np.uint64): libindex.UInt64Engine,
np.dtype(np.float32): libindex.Float32Engine,
np.dtype(np.float64): libindex.Float64Engine,
np.dtype(np.complex64): libindex.Complex64Engine,
np.dtype(np.complex128): libindex.Complex128Engine,
}
@property
def _engine_type(self) -> type[libindex.IndexEngine]:
# error: Invalid index type "Union[dtype[Any], ExtensionDtype]" for
# "Dict[dtype[Any], Type[IndexEngine]]"; expected type "dtype[Any]"
return self._engine_types[self.dtype] # type: ignore[index]
@cache_readonly
def inferred_type(self) -> str:
return {
"i": "integer",
"u": "integer",
"f": "floating",
"c": "complex",
}[self.dtype.kind]
def __new__(
cls, data=None, dtype: Dtype | None = None, copy=False, name=None
) -> NumericIndex:
name = maybe_extract_name(name, data, cls)
subarr = cls._ensure_array(data, dtype, copy)
return cls._simple_new(subarr, name=name)
@classmethod
def _ensure_array(cls, data, dtype, copy: bool):
"""
Ensure we have a valid array to pass to _simple_new.
"""
cls._validate_dtype(dtype)
if not isinstance(data, (np.ndarray, Index)):
# Coerce to ndarray if not already ndarray or Index
if is_scalar(data):
raise cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
orig = data
data = np.asarray(data, dtype=dtype)
if dtype is None and data.dtype.kind == "f":
if cls is UInt64Index and (data >= 0).all():
# https://github.com/numpy/numpy/issues/19146
data = np.asarray(orig, dtype=np.uint64)
if issubclass(data.dtype.type, str):
cls._string_data_error(data)
dtype = cls._ensure_dtype(dtype)
if copy or not is_dtype_equal(data.dtype, dtype):
# TODO: the try/except below is because it's difficult to predict the error
# and/or error message from different combinations of data and dtype.
# Efforts to avoid this try/except welcome.
# See https://github.com/pandas-dev/pandas/pull/41153#discussion_r676206222
try:
subarr = np.array(data, dtype=dtype, copy=copy)
cls._validate_dtype(subarr.dtype)
except (TypeError, ValueError):
raise ValueError(f"data is not compatible with {cls.__name__}")
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if subarr.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
subarr = np.asarray(subarr)
return subarr
@classmethod
def _validate_dtype(cls, dtype: Dtype | None) -> None:
if dtype is None:
return
validation_func, expected = cls._dtype_validation_metadata
if not validation_func(dtype):
raise ValueError(
f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
)
@classmethod
def _ensure_dtype(cls, dtype: Dtype | None) -> np.dtype | None:
"""
Ensure int64 dtype for Int64Index etc. but allow int32 etc. for NumericIndex.
Assumes dtype has already been validated.
"""
if dtype is None:
return cls._default_dtype
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if cls._is_backward_compat_public_numeric_index:
# dtype for NumericIndex
return dtype
else:
# dtype for Int64Index, UInt64Index etc. Needed for backwards compat.
return cls._default_dtype
# ----------------------------------------------------------------
# Indexing Methods
# error: Decorated property not supported
@cache_readonly # type: ignore[misc]
@doc(Index._should_fallback_to_positional)
def _should_fallback_to_positional(self) -> bool:
return False
@doc(Index._convert_slice_indexer)
def _convert_slice_indexer(self, key: slice, kind: str, is_frame: bool = False):
# TODO(2.0): once #45324 deprecation is enforced we should be able
# to simplify this.
if is_float_dtype(self.dtype):
assert kind in ["loc", "getitem"]
# TODO: can we write this as a condition based on
# e.g. _should_fallback_to_positional?
# We always treat __getitem__ slicing as label-based
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step)
return super()._convert_slice_indexer(key, kind=kind, is_frame=is_frame)
@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
# ----------------------------------------------------------------
@doc(Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = lib.no_default):
if not self._can_hold_na and values.dtype.kind == "f":
name = self._name if name is lib.no_default else name
# Ensure we are not returning an Int64Index with float data:
return Float64Index._simple_new(values, name=name)
return super()._shallow_copy(values=values, name=name)
def _convert_tolerance(self, tolerance, target):
tolerance = super()._convert_tolerance(tolerance, target)
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} must contain "
"numeric elements if it is list type"
)
else:
raise ValueError(
f"tolerance argument for {type(self).__name__} must be numeric "
f"if it is a scalar: {repr(tolerance)}"
)
return tolerance
@classmethod
def _assert_safe_casting(cls, data: np.ndarray, subarr: np.ndarray) -> None:
"""
Ensure incoming data can be represented with matching signed-ness.
Needed if the process of casting data from some accepted dtype to the internal
dtype(s) bears the risk of truncation (e.g. float to int).
"""
if is_integer_dtype(subarr.dtype):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
def _format_native_types(
self, *, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import FloatArrayFormatter
if is_float_dtype(self.dtype):
formatter = FloatArrayFormatter(
self._values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
return super()._format_native_types(
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
**kwargs,
)
_num_index_shared_docs = {}
_num_index_shared_docs[
"class_descr"
] = """
Immutable sequence used for indexing and alignment.
.. deprecated:: 1.4.0
In pandas v2.0 %(klass)s will be removed and :class:`NumericIndex` used instead.
%(klass)s will remain fully functional for the duration of pandas 1.x.
The basic object storing axis labels for all pandas objects.
%(klass)s is a special case of `Index` with purely %(ltype)s labels. %(extra)s.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
See Also
--------
Index : The base pandas Index type.
NumericIndex : Index of numpy int/uint/float data.
Notes
-----
An Index instance can **only** contain hashable objects.
"""
class IntegerIndex(NumericIndex):
"""
This is an abstract class for Int64Index, UInt64Index.
"""
_is_backward_compat_public_numeric_index: bool = False
@property
def asi8(self) -> npt.NDArray[np.int64]:
# do not cache or you'll create a memory leak
warnings.warn(
"Index.asi8 is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
return self._values.view(self._default_dtype)
class Int64Index(IntegerIndex):
_index_descr_args = {
"klass": "Int64Index",
"ltype": "integer",
"dtype": "int64",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "int64index"
_default_dtype = np.dtype(np.int64)
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
@property
def _engine_type(self) -> type[libindex.Int64Engine]:
return libindex.Int64Engine
class UInt64Index(IntegerIndex):
_index_descr_args = {
"klass": "UInt64Index",
"ltype": "unsigned integer",
"dtype": "uint64",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "uint64index"
_default_dtype = np.dtype(np.uint64)
_dtype_validation_metadata = (is_unsigned_integer_dtype, "unsigned integer")
@property
def _engine_type(self) -> type[libindex.UInt64Engine]:
return libindex.UInt64Engine
class Float64Index(NumericIndex):
_index_descr_args = {
"klass": "Float64Index",
"dtype": "float64",
"ltype": "float",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "float64index"
_default_dtype = np.dtype(np.float64)
_dtype_validation_metadata = (is_float_dtype, "float")
_is_backward_compat_public_numeric_index: bool = False
@property
def _engine_type(self) -> type[libindex.Float64Engine]:
return libindex.Float64Engine
| {
"content_hash": "accf259a9fc6544c24773911c1b8962b",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 88,
"avg_line_length": 31.831353919239906,
"alnum_prop": 0.5959256771882695,
"repo_name": "datapythonista/pandas",
"id": "d114fe47fa0f1aa2e965979478111cf715e97360",
"size": "13401",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/core/indexes/numeric.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "355524"
},
{
"name": "CSS",
"bytes": "1662"
},
{
"name": "Cython",
"bytes": "1178139"
},
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "HTML",
"bytes": "456449"
},
{
"name": "Makefile",
"bytes": "505"
},
{
"name": "Python",
"bytes": "19048364"
},
{
"name": "Shell",
"bytes": "10511"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
'''A trivial python program with experimental hacks to demonstrate usage of Git'''
#I don't know Python very well so this might break things
for i in range(0,10):
print(i)
print("hello, world, I can count!") #official K&$ style hello world
| {
"content_hash": "89d225103f3c4aaae57a8521fe2ddd70",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 82,
"avg_line_length": 30.75,
"alnum_prop": 0.7235772357723578,
"repo_name": "rgmerk/version-control-example-Monash",
"id": "ee55a443b5e2eed5e9475eb8275ba06adcb59bd5",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/helloworld.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "266"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def _get_connections_from_db(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
session.close()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
log = LoggingMixin().log
log.info("Using connection to: %s", conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| {
"content_hash": "1078a4e5294ab9a46da513283822abb9",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 28.36904761904762,
"alnum_prop": 0.6311372219890894,
"repo_name": "janczak10/incubator-airflow",
"id": "92313ca2671307786e0b6ed7a24c55c2781fc95f",
"size": "2950",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "airflow/hooks/base_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57033"
},
{
"name": "HTML",
"bytes": "151780"
},
{
"name": "JavaScript",
"bytes": "1364376"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2469512"
},
{
"name": "Shell",
"bytes": "21140"
}
],
"symlink_target": ""
} |
"""Base bolt for integration tests"""
import copy
from heron.common.src.python.utils.log import Log
from heronpy.api.bolt.bolt import Bolt
from heronpy.api.stream import Stream
from heronpy.api.component.component_spec import HeronComponentSpec
import heron.common.src.python.pex_loader as pex_loader
from ..core import constants as integ_const
from .batch_bolt import BatchBolt
# pylint: disable=missing-docstring
class IntegrationTestBolt(Bolt):
"""Base bolt for integration test
Every bolt of integration test topology consists of this instance, each delegating user's bolt.
"""
outputs = [Stream(fields=[integ_const.INTEGRATION_TEST_TERMINAL],
name=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)]
@classmethod
def spec(cls, name, par, inputs, config, user_bolt_classpath, user_output_fields=None):
python_class_path = f"{cls.__module__}.{cls.__name__}"
config[integ_const.USER_BOLT_CLASSPATH] = user_bolt_classpath
# avoid modification to cls.outputs
_outputs = copy.copy(cls.outputs)
if user_output_fields is not None:
_outputs.extend(user_output_fields)
return HeronComponentSpec(name, python_class_path, is_spout=False, par=par,
inputs=inputs, outputs=_outputs, config=config)
def initialize(self, config, context):
user_bolt_classpath = config.get(integ_const.USER_BOLT_CLASSPATH, None)
if user_bolt_classpath is None:
raise RuntimeError("User defined integration bolt was not found")
user_bolt_cls = self._load_user_bolt(context.get_topology_pex_path(), user_bolt_classpath)
self.user_bolt = user_bolt_cls(delegate=self)
upstream_components = set()
self.terminal_to_receive = 0
for streamId in list(context.get_this_sources().keys()):
# streamId is topology_pb2.StreamId protobuf message
upstream_components.add(streamId.component_name)
for comp_name in upstream_components:
self.terminal_to_receive += len(context.get_component_tasks(comp_name))
self.tuple_received = 0
self.tuples_processed = 0
self.current_tuple_processing = None
Log.info("Terminals to receive: %d", self.terminal_to_receive)
self.user_bolt.initialize(config, context)
@staticmethod
def _load_user_bolt(pex_file, classpath):
pex_loader.load_pex(pex_file)
cls = pex_loader.import_and_get_class(pex_file, classpath)
return cls
@property
def is_done(self):
return self.terminal_to_receive == 0
def process(self, tup):
self.tuple_received += 1
stream_id = tup.stream
Log.info("Received a tuple: %s from %s", (tup, stream_id))
if stream_id == integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID:
self.terminal_to_receive -= 1
if self.is_done:
if isinstance(self.user_bolt, BatchBolt):
Log.info("Invoke bolt to do finish batch")
self.user_bolt.finish_batch()
Log.info("Populating the terminals to downstream")
super().emit(
[integ_const.INTEGRATION_TEST_TERMINAL],
stream=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)
else:
self.current_tuple_processing = tup
self.user_bolt.process(tup)
self.ack(tup)
def emit(self, tup, stream=Stream.DEFAULT_STREAM_ID, anchors=None,
direct_task=None, need_task_ids=False):
Log.info("emitting tuple: %s", tup)
if tup is None:
super().emit(list(self.current_tuple_processing),
stream=stream, anchors=anchors,
direct_task=direct_task, need_task_ids=need_task_ids)
else:
super().emit(tup, stream, anchors, direct_task, need_task_ids)
def ack(self, tup):
Log.info("Trying to do an ack. tuples processed: %d, received: %d",
self.tuples_processed, self.tuple_received)
if self.tuples_processed < self.tuple_received:
super().ack(tup)
self.tuples_processed += 1
def fail(self, tup):
Log.info("Trying to do a fail. tuples processed: %d, received: %d",
self.tuples_processed, self.tuple_received)
if self.tuples_processed < self.tuple_received:
super().fail(tup)
self.tuples_processed += 1
def process_tick(self, tup):
self.user_bolt.process_tick(tup)
| {
"content_hash": "e5cdd2ab0b24f4d00d49a505967f3199",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 97,
"avg_line_length": 38.567567567567565,
"alnum_prop": 0.6757766876897922,
"repo_name": "twitter/heron",
"id": "89a06b58fc10e98af68dae3ee8524df4f6976eec",
"size": "5131",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integration_test/src/python/integration_test/core/integration_test_bolt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11709"
},
{
"name": "C++",
"bytes": "1623082"
},
{
"name": "CSS",
"bytes": "109554"
},
{
"name": "HCL",
"bytes": "2115"
},
{
"name": "HTML",
"bytes": "156820"
},
{
"name": "Java",
"bytes": "4466689"
},
{
"name": "JavaScript",
"bytes": "1111202"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "781"
},
{
"name": "Objective-C",
"bytes": "1929"
},
{
"name": "Python",
"bytes": "1537910"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "72781"
},
{
"name": "Shell",
"bytes": "166876"
},
{
"name": "Smarty",
"bytes": "528"
},
{
"name": "Thrift",
"bytes": "915"
}
],
"symlink_target": ""
} |
from nose.tools import assert_true, assert_false, assert_raises
try:
from nose.tools import assert_is_instance, assert_dict_equal
except ImportError:
from landlab.testing.tools import assert_is_instance, assert_dict_equal
from six import StringIO
from landlab.core import load_params
from landlab.testing.tools import cdtemp
YAML_PARAMS_STR = """
x: 1e7
y: 1
z: [1, 2]
a: frog
"""
MPD_PARAMS_STR = """
x: A float
1e7
y: An int
1
a: A string
frog
"""
YAML_PARAMS = {
'x': 1e7,
'y': 1,
'z': [1, 2],
'a': 'frog',
}
MPD_PARAMS = {
'x': 1e7,
'y': 1,
'a': 'frog',
}
def test_from_yaml_string():
"""Load parameters from YAML-formatted string."""
params = load_params(YAML_PARAMS_STR)
assert_dict_equal(params, YAML_PARAMS)
assert_is_instance(params['x'], float)
assert_is_instance(params['y'], int)
def test_from_yaml_file_like():
"""Load parameters from YAML-formatted string."""
params = load_params(StringIO(YAML_PARAMS_STR))
assert_dict_equal(params, YAML_PARAMS)
assert_is_instance(params['x'], float)
assert_is_instance(params['y'], int)
def test_from_yaml_path():
"""Load parameters from YAML-formatted string."""
with cdtemp() as dir:
with open('params.yaml', 'w') as fp:
fp.write(YAML_PARAMS_STR)
params = load_params('./params.yaml')
assert_dict_equal(params, YAML_PARAMS)
assert_is_instance(params['x'], float)
assert_is_instance(params['y'], int)
def test_from_mpd_string():
"""Load parameters from YAML-formatted string."""
params = load_params(MPD_PARAMS_STR)
assert_dict_equal(params, MPD_PARAMS)
assert_is_instance(params['x'], float)
assert_is_instance(params['y'], int)
def test_from_yaml_file_like():
"""Load parameters from YAML-formatted string."""
params = load_params(StringIO(MPD_PARAMS_STR))
assert_dict_equal(params, MPD_PARAMS)
assert_is_instance(params['x'], float)
assert_is_instance(params['y'], int)
def test_from_yaml_path():
"""Load parameters from YAML-formatted string."""
with cdtemp() as dir:
with open('params.txt', 'w') as fp:
fp.write(MPD_PARAMS_STR)
params = load_params('./params.txt')
assert_dict_equal(params, MPD_PARAMS)
assert_is_instance(params['x'], float)
assert_is_instance(params['y'], int)
| {
"content_hash": "444f5af1cdae0bd98fb6d6e4d7630692",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 75,
"avg_line_length": 26.244444444444444,
"alnum_prop": 0.6477561388653683,
"repo_name": "RondaStrauch/landlab",
"id": "50213e75d04d33423d1f30edb4ee6c57df48f915",
"size": "2362",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "landlab/core/tests/test_load_params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1452"
},
{
"name": "HTML",
"bytes": "99948"
},
{
"name": "Jupyter Notebook",
"bytes": "13921"
},
{
"name": "Makefile",
"bytes": "1765"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "3447076"
},
{
"name": "Shell",
"bytes": "2370"
},
{
"name": "TeX",
"bytes": "64170"
}
],
"symlink_target": ""
} |
"""
Demonstrates fetching id3 information for a song
Note: sudo pip install mutagen
eg: ./020-id3.py "data/06 Cliantro Vision.mp3"
"""
import argparse
from mutagen.easyid3 import EasyID3
def main():
parser = argparse.ArgumentParser(description='Download some videos')
parser.add_argument('song', type=str, nargs=1, help='The song to read id3 tags from')
args = parser.parse_args()
audio = EasyID3(args.song[0])
print "Album: "+audio["album"][0]
print "Artist: "+audio["artist"][0]
print "Title: "+audio["title"][0]
if __name__ == '__main__':
main() | {
"content_hash": "ea4ca42404e95d1e063b18b442373b0e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 21.76923076923077,
"alnum_prop": 0.6890459363957597,
"repo_name": "KitchenTableCoders/cli-video",
"id": "cd33a03f0fb4f5c24b3a3ac67c7318b9428ffd34",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "020-id3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29046"
}
],
"symlink_target": ""
} |
"""
Class for grids of the two components of the horizontal gradient.
"""
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import copy as _copy
import xarray as _xr
from .shgrid import SHGrid as _SHGrid
class SHGradient(object):
"""
Class for grids of the two components of the horizontal gradient of a
scalar function. The class is initialized from a class instance of
SHCoeffs using the method gradient().
Attributes:
theta : SHGrid class instance of the theta component of the
horizontal gradient.
phi : SHGrid class instance of the phi component of the
horizontal gradient.
units : The units of the gridded data.
lmax : The maximum spherical harmonic degree resolvable by the
grids.
lmax_calc : The maximum spherical harmonic degree of the function
used in creating the grids.
nlat, nlon : The number of latitude and longitude bands in the grids.
n : The number of samples in latitude.
sampling : The longitudinal sampling for Driscoll and Healy grids.
Either 1 for equally sampled grids (nlat=nlon) or 2 for
equally spaced grids in degrees.
extend : True if the grid contains the redundant column for 360 E
and the unnecessary row for 90 S.
Methods:
plot() : Plot the two components of the horizontal gradient.
plot_theta() : Plot the theta component of the horizontal gradient.
plot_phi() : Plot the phi component of the horizontal gradient.
to_xarray() : Return an xarray DataSet of all gridded data.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the SHGravGrid
instance.
"""
def __init__(self, theta, phi, lmax, lmax_calc, units=None,
pot_units=None, epoch=None):
"""
Initialize the SHGradient class.
"""
self.theta = _SHGrid.from_array(theta, grid='DH', units=units)
self.phi = _SHGrid.from_array(phi, grid='DH', units=units)
self.grid = self.theta.grid
self.sampling = self.theta.sampling
self.nlat = self.theta.nlat
self.nlon = self.theta.nlon
self.n = self.theta.n
self.extend = self.theta.extend
self.lmax = lmax
self.lmax_calc = lmax_calc
self.units = units
def copy(self):
"""
Return a deep copy of the class instance.
Usage
-----
copy = x.copy()
"""
return _copy.deepcopy(self)
def info(self):
"""
Print a summary of the data stored in the SHGradient class instance.
Usage
-----
x.info()
"""
print(repr(self))
def __repr__(self):
str = ('grid = {:s}\n'
'nlat = {:d}\n'
'nlon = {:d}\n'
'n = {:d}\n'
'sampling = {:d}\n'
'extend = {}\n'
'lmax = {:d}\n'
'lmax_calc = {:d}\n'
'units = {:s}'
.format(self.grid, self.nlat, self.nlon, self.n, self.sampling,
self.extend, self.lmax, self.lmax_calc,
repr(self.units)))
return str
def plot_theta(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label='$\\theta$ component',
cb_tick_interval=None, grid=False, axes_labelsize=None,
tick_labelsize=None, show=True, ax=None, cb_offset=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
fname=None, cb_width=None):
"""
Plot the theta component of the horizontal gradient.
Usage
-----
x.plot_theta([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\\theta$ component'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
return self.theta.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
cb_width=cb_width,
show=show, fname=fname)
def plot_phi(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label='$\\phi$ component',
cb_tick_interval=None, grid=False, axes_labelsize=None,
tick_labelsize=None, show=True, ax=None, cb_offset=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_width=None, fname=None):
"""
Plot the phi component of the horizontal gradient.
Usage
-----
x.plot_phi([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\\phi$ component'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
return self.phi.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot(self, projection=None, tick_interval=[60, 30],
minor_tick_interval=[None, None], xlabel='Longitude',
ylabel='Latitude', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False, cb_triangles='neither',
cb_tick_interval=None, grid=False, axes_labelsize=9,
tick_labelsize=8, show=True, cb_offset=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_width=None, fname=None):
"""
Plot the two vector components of the horizontal gradient.
Usage
-----
x.plot([projection, tick_interval, minor_tick_interval, ticks, xlabel,
ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 0.4
else:
scale = 0.25
else:
scale = 0.3
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(1, 2, figsize=figsize)
self.plot_theta(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=None,
titlesize=None, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_offset=cb_offset,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, show=show, fname=None)
self.plot_phi(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=None,
titlesize=None, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_offset=cb_offset,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, show=show, fname=None)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def to_xarray(self, title='', description='',
comment='pyshtools grid'):
"""
Return the horizontal gradient gridded data as an xarray DataSet.
Usage
-----
x.to_xarray([title, description, comment])
Parameters
----------
title : str, optional, default = ''
Title of the dataset.
description : str, optional, default = ''
Description of the dataset ('Remark' in gmt grd files).
comment : str, optional, default = 'pyshtools grid'
Additional information about how the data were generated.
"""
attrs = {'title': title,
'description': description,
'comment': comment,
'nlat': self.nlat,
'nlon': self.nlon,
'lmax': self.lmax,
'grid': self.grid,
'lmax_calc': self.lmax_calc,
'sampling': self.sampling,
'n': self.n,
'extend': repr(self.extend)
}
if self.epoch is not None:
attrs['epoch'] = self.epoch
_theta = self.theta.to_xarray(title='gradient (theta)',
long_name='theta component',
units=repr(self.units))
_phi = self.phi.to_xarray(title='gradient (phi)',
long_name='phi components',
units=repr(self.units))
return _xr.Dataset({'theta': _theta, 'phi': _phi}, attrs=attrs)
| {
"content_hash": "c67f98f11ff24448e6b0f8af6b121736",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 79,
"avg_line_length": 49.940677966101696,
"alnum_prop": 0.5664771763108774,
"repo_name": "MarkWieczorek/SHTOOLS",
"id": "f28392f2e8610e8d258959eb11c73d3e62fe5697",
"size": "23572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyshtools/shclasses/shgradient.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "47054"
},
{
"name": "Fortran",
"bytes": "1566213"
},
{
"name": "Makefile",
"bytes": "21071"
},
{
"name": "Python",
"bytes": "1442641"
}
],
"symlink_target": ""
} |
"""
Setup script for Diofant.
This script uses Setuptools (https://setuptools.readthedocs.io/en/latest/).
"""
import setuptools
setuptools.setup()
| {
"content_hash": "2cd967599f0c939edb9eebd7323989b9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 15.1,
"alnum_prop": 0.7417218543046358,
"repo_name": "skirpichev/omg",
"id": "fa06eee3402f8a5efa47fc993b9af1fe6d4c735c",
"size": "174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10305079"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.