text
stringlengths 4
1.02M
| meta
dict |
---|---|
import unittest
from unittest.mock import Mock
from airflow.models import TaskInstance
from airflow.ti_deps.deps.dag_ti_slots_available_dep import DagTISlotsAvailableDep
class TestDagTISlotsAvailableDep(unittest.TestCase):
def test_concurrency_reached(self):
"""
Test max_active_tasks reached should fail dep
"""
dag = Mock(concurrency=1, get_concurrency_reached=Mock(return_value=True))
task = Mock(dag=dag, pool_slots=1)
ti = TaskInstance(task, execution_date=None)
assert not DagTISlotsAvailableDep().is_met(ti=ti)
def test_all_conditions_met(self):
"""
Test all conditions met should pass dep
"""
dag = Mock(concurrency=1, get_concurrency_reached=Mock(return_value=False))
task = Mock(dag=dag, pool_slots=1)
ti = TaskInstance(task, execution_date=None)
assert DagTISlotsAvailableDep().is_met(ti=ti)
| {
"content_hash": "71e05c94fff1dbbecc974300ca71a4aa",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 83,
"avg_line_length": 34.407407407407405,
"alnum_prop": 0.682454251883746,
"repo_name": "apache/incubator-airflow",
"id": "9ae6a1daafa9bcc9d93f88afc35cdc6ab7abef7e",
"size": "1718",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "tests/ti_deps/deps/test_dag_ti_slots_available_dep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
from PySide import QtCore, QtGui
class Ui_About(object):
def setupUi(self, About):
About.setObjectName("About")
About.setWindowModality(QtCore.Qt.ApplicationModal)
About.resize(480, 560)
About.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
About.setModal(False)
self.verticalLayout = QtGui.QVBoxLayout(About)
self.verticalLayout.setObjectName("verticalLayout")
self.about_tabs = QtGui.QTabWidget(About)
self.about_tabs.setTabShape(QtGui.QTabWidget.Rounded)
self.about_tabs.setObjectName("about_tabs")
self.info_tab = QtGui.QWidget()
self.info_tab.setObjectName("info_tab")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.info_tab)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.scrollArea_2 = QtGui.QScrollArea(self.info_tab)
self.scrollArea_2.setStyleSheet("QScrollArea {background-color:transparent;}")
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName("scrollArea_2")
self.scrollAreaWidgetContents_2 = QtGui.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 454, 485))
self.scrollAreaWidgetContents_2.setStyleSheet("background-color:transparent;")
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.verticalLayout_6 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_2)
self.verticalLayout_6.setContentsMargins(6, 0, 6, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.text_lbl = QtGui.QLabel(self.scrollAreaWidgetContents_2)
self.text_lbl.setText("")
self.text_lbl.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.text_lbl.setWordWrap(True)
self.text_lbl.setOpenExternalLinks(True)
self.text_lbl.setObjectName("text_lbl")
self.verticalLayout_6.addWidget(self.text_lbl)
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout_2.addWidget(self.scrollArea_2)
self.about_tabs.addTab(self.info_tab, "")
self.log_tab = QtGui.QWidget()
self.log_tab.setObjectName("log_tab")
self.verticalLayout_8 = QtGui.QVBoxLayout(self.log_tab)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.log_txt = QtGui.QPlainTextEdit(self.log_tab)
self.log_txt.setFrameShape(QtGui.QFrame.WinPanel)
self.log_txt.setDocumentTitle("")
self.log_txt.setUndoRedoEnabled(False)
self.log_txt.setReadOnly(True)
self.log_txt.setPlainText("")
self.log_txt.setObjectName("log_txt")
self.verticalLayout_8.addWidget(self.log_txt)
self.about_tabs.addTab(self.log_tab, "")
self.verticalLayout.addWidget(self.about_tabs)
self.btn_box = QtGui.QFrame(About)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_box.sizePolicy().hasHeightForWidth())
self.btn_box.setSizePolicy(sizePolicy)
self.btn_box.setObjectName("btn_box")
self.horizontalLayout = QtGui.QHBoxLayout(self.btn_box)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.about_qt_btn = QtGui.QPushButton(self.btn_box)
self.about_qt_btn.setObjectName("about_qt_btn")
self.horizontalLayout.addWidget(self.about_qt_btn)
spacerItem = QtGui.QSpacerItem(92, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.updates_btn = QtGui.QPushButton(self.btn_box)
self.updates_btn.setObjectName("updates_btn")
self.horizontalLayout.addWidget(self.updates_btn)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.close_btn = QtGui.QPushButton(self.btn_box)
self.close_btn.setObjectName("close_btn")
self.horizontalLayout.addWidget(self.close_btn)
self.verticalLayout.addWidget(self.btn_box)
self.retranslateUi(About)
self.about_tabs.setCurrentIndex(0)
QtCore.QObject.connect(self.close_btn, QtCore.SIGNAL("clicked()"), About.close)
QtCore.QMetaObject.connectSlotsByName(About)
def retranslateUi(self, About):
self.about_tabs.setTabText(self.about_tabs.indexOf(self.info_tab), QtGui.QApplication.translate("About", "Information", None, QtGui.QApplication.UnicodeUTF8))
self.about_tabs.setTabText(self.about_tabs.indexOf(self.log_tab), QtGui.QApplication.translate("About", "Log", None, QtGui.QApplication.UnicodeUTF8))
self.about_qt_btn.setText(QtGui.QApplication.translate("About", "About Qt", None, QtGui.QApplication.UnicodeUTF8))
self.updates_btn.setToolTip(QtGui.QApplication.translate("About", "Check online for an updated version", None, QtGui.QApplication.UnicodeUTF8))
self.updates_btn.setText(QtGui.QApplication.translate("About", "Check for Updates", None, QtGui.QApplication.UnicodeUTF8))
self.close_btn.setText(QtGui.QApplication.translate("About", "Close", None, QtGui.QApplication.UnicodeUTF8))
import images_rc
| {
"content_hash": "681aceccd487fc254ef14ef209ac646e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 166,
"avg_line_length": 59.333333333333336,
"alnum_prop": 0.713664371148967,
"repo_name": "noembryo/KoHighlights",
"id": "aa167941b4cff5bd1b92630b58755326d370b949",
"size": "5794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui_about.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1003940"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import config
__version__ = config.VERSION
# Module API
from .cli import cli
from .stream import Stream
from .loader import Loader
from .parser import Parser
from .writer import Writer
from .validate import validate
from .exceptions import TabulatorException
from .exceptions import SourceError
from .exceptions import SchemeError
from .exceptions import FormatError
from .exceptions import EncodingError
from .exceptions import CompressionError
# Deprecated
from . import exceptions
from .exceptions import IOError
from .exceptions import LoadingError
from .exceptions import HTTPError
| {
"content_hash": "41a5673e11f6a59ff6f75eab93bae19a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 42,
"avg_line_length": 25.79310344827586,
"alnum_prop": 0.8114973262032086,
"repo_name": "okfn/tabulator-py",
"id": "b3eb112917ac0b95570f450ffaae559a5a2281ab",
"size": "772",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tabulator/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "445"
},
{
"name": "Python",
"bytes": "59663"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import admin
#from django.utils.safestring import mark_safe
#from django.utils.translation import ugettext_lazy as _
from cablegate.cable.models import Cable, CableMetadata
class CableMetadataInline(admin.StackedInline):
model = CableMetadata
class CableAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'refid', 'classification', 'origin')
list_filter = ('classification', 'origin')
date_hierarchy = 'date'
search_fields = ('content',)
inlines = [CableMetadataInline]
admin.site.register(Cable, CableAdmin)
class CableMetadataAdmin(admin.ModelAdmin):
pass
admin.site.register(CableMetadata, CableMetadataAdmin)
| {
"content_hash": "7be08bd5dc2e2ae6b89d6e77bb59e45b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 30.608695652173914,
"alnum_prop": 0.7599431818181818,
"repo_name": "h3/django-cablegate",
"id": "774202cd06d5d828ea0e735eb39db7ee16fc6bfd",
"size": "729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cablegate/cable/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "264754"
},
{
"name": "Python",
"bytes": "13355"
}
],
"symlink_target": ""
} |
import time
import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from list import ListView
from detail import DetailView
class DateView(ListView):
"""
Abstract base class for date-based views.
"""
def __init__(self, **kwargs):
self._load_config_values(kwargs,
allow_future = False,
date_field = None,
)
super(DateView, self).__init__(**kwargs)
# Never use legacy pagination context since previous date-based
# views weren't paginated.
self.legacy_context = False
def get(self, *args, **kwargs):
obj = self.get_object(*args, **kwargs)
date_list, items, extra_context = self.get_dated_items(*args, **kwargs)
template = self.get_template(items)
context = self.get_context(items, date_list, extra_context)
mimetype = self.get_mimetype(items)
response = self.get_response(items, template, context, mimetype=mimetype)
return response
def get_queryset(self):
"""
Get the queryset to look an objects up against. May not be called if
`get_dated_items` is overridden.
"""
if self.queryset is None:
raise ImproperlyConfigured("%(cls)s is missing a queryset. Define "\
"%(cls)s.queryset, or override "\
"%(cls)s.get_dated_items()." \
% {'cls': self.__class__.__name__})
return self.queryset._clone()
def get_dated_queryset(self, allow_future=False, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = allow_future or self.get_allow_future()
allow_empty = self.get_allow_empty()
if not allow_future:
qs = qs.filter(**{'%s__lte' % date_field: datetime.datetime.now()})
if not allow_empty and not qs:
raise Http404("No %s available" % qs.model._meta.verbose_name_plural)
return qs
def get_date_list(self, queryset, date_type):
"""
Get a date list by calling `queryset.dates()`, checking along the way
for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
date_list = queryset.dates(date_field, date_type)[::-1]
if date_list is not None and not date_list and not allow_empty:
raise Http404("No %s available" % queryset.model._meta.verbose_name_plural)
return date_list
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
def get_context(self, items, date_list, context=None):
"""
Get the context. Must return a Context (or subclass) instance.
"""
if not context:
context = {}
context['date_list'] = date_list
return super(DateView, self).get_context(
items, paginator=None, page=None, context=context,
)
def get_template_names(self, items):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if get_template is overridden.
"""
return super(DateView, self).get_template_names(items, suffix=self._template_name_suffix)
def get_dated_items(self, *args, **kwargs):
"""
Return (date_list, items, extra_context) for this request.
"""
raise NotImplementedError()
class ArchiveView(DateView):
"""
Top-level archive of date-based items.
"""
_template_name_suffix = 'archive'
def __init__(self, **kwargs):
self._load_config_values(kwargs, num_latest=15)
super(ArchiveView, self).__init__(**kwargs)
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, 'year')
num_latest = self.get_num_latest()
if date_list and num_latest:
latest = qs.order_by('-'+self.get_date_field())[:num_latest]
else:
latest = None
return (date_list, latest, {})
def get_num_latest(self):
"""
Get the number of latest items to show on the archive page.
"""
return self.num_latest
def get_template_object_name(self, items):
"""
Get the name of the item to be used in the context.
"""
return self.template_object_name or 'latest'
class YearView(DateView):
"""
List of objects published in a given year.
"""
_template_name_suffix = 'archive_year'
def __init__(self, **kwargs):
# Override the allow_empty default from ListView
allow_empty = kwargs.pop('allow_empty', getattr(self, 'allow_empty', False))
self._load_config_values(kwargs, make_object_list=False)
super(YearView, self).__init__(allow_empty=allow_empty, **kwargs)
def get_dated_items(self, year):
"""
Return (date_list, items, extra_context) for this request.
"""
# Yes, no error checking: the URLpattern ought to validate this; it's
# an error if it doesn't.
year = int(year)
date_field = self.get_date_field()
qs = self.get_dated_queryset(**{date_field+'__year': year})
date_list = self.get_date_list(qs, 'month')
if self.get_make_object_list():
object_list = qs.order_by('-'+date_field)
else:
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
object_list = qs.none()
return (date_list, object_list, {'year': year})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class MonthView(DateView):
"""
List of objects published in a given year.
"""
_template_name_suffix = 'archive_month'
def __init__(self, **kwargs):
# Override the allow_empty default from ListView
allow_empty = kwargs.pop('allow_empty', getattr(self, 'allow_empty', False))
self._load_config_values(kwargs, month_format='%b')
super(MonthView, self).__init__(allow_empty=allow_empty, **kwargs)
def get_dated_items(self, year, month):
"""
Return (date_list, items, extra_context) for this request.
"""
date_field = self.get_date_field()
date = _date_from_string(year, '%Y', month, self.get_month_format())
# Construct a date-range lookup.
first_day, last_day = _month_bounds(date)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
allow_future = self.get_allow_future()
qs = self.get_dated_queryset(allow_future=allow_future, **lookup_kwargs)
date_list = self.get_date_list(qs, 'day')
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
def get_next_month(self, date):
"""
Get the next valid month.
"""
first_day, last_day = _month_bounds(date)
next = (last_day + datetime.timedelta(days=1)).replace(day=1)
return _get_next_prev_month(self, next, is_previous=False, use_first_day=True)
def get_previous_month(self, date):
"""
Get the previous valid month.
"""
first_day, last_day = _month_bounds(date)
prev = (first_day - datetime.timedelta(days=1)).replace(day=1)
return _get_next_prev_month(self, prev, is_previous=True, use_first_day=True)
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
class WeekView(DateView):
"""
List of objects published in a given week.
"""
_template_name_suffix = 'archive_year'
def __init__(self, **kwargs):
# Override the allow_empty default from ListView
allow_empty = kwargs.pop('allow_empty', getattr(self, 'allow_empty', False))
super(WeekView, self).__init__(allow_empty=allow_empty, **kwargs)
def get_dated_items(self, year, week):
"""
Return (date_list, items, extra_context) for this request.
"""
date_field = self.get_date_field()
date = _date_from_string(year, '%Y', '0', '%w', week, '%U')
# Construct a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
allow_future = self.get_allow_future()
qs = self.get_dated_queryset(allow_future=allow_future, **lookup_kwargs)
return (None, qs, {'week': date})
class DayView(DateView):
"""
List of objects published on a given day.
"""
_template_name_suffix = "archive_day"
def __init__(self, **kwargs):
# Override the allow_empty default from ListView
allow_empty = kwargs.pop('allow_empty', getattr(self, 'allow_empty', False))
self._load_config_values(kwargs, month_format='%b', day_format='%d')
super(DayView, self).__init__(allow_empty=allow_empty, **kwargs)
def get_dated_items(self, year, month, day, date=None):
"""
Return (date_list, items, extra_context) for this request.
"""
date = _date_from_string(year, '%Y',
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayView can be trivial.
"""
date_field = self.get_date_field()
allow_future = self.get_allow_future()
field = self.get_queryset().model._meta.get_field(date_field)
lookup_kwargs = _date_lookup_for_field(field, date)
qs = self.get_dated_queryset(allow_future=allow_future, **lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date)
})
def get_next_day(self, date):
"""
Get the next valid day.
"""
next = date + datetime.timedelta(days=1)
return _get_next_prev_month(self, next, is_previous=False, use_first_day=False)
def get_previous_day(self, date):
"""
Get the previous valid day.
"""
prev = date - datetime.timedelta(days=1)
return _get_next_prev_month(self, prev, is_previous=True, use_first_day=False)
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_day_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.day_format
class TodayView(DayView):
"""
List of objects published today.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
return self._get_dated_items(datetime.date.today())
class DateDetailView(DetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def __init__(self, **kwargs):
self._load_config_values(kwargs,
date_field = None,
month_format = '%b',
day_format = '%d',
allow_future = False,
)
super(DateDetailView, self).__init__(**kwargs)
def get_object(self, year, month, day, pk=None, slug=None, object_id=None):
"""
Get the object this request displays.
"""
date = _date_from_string(year, '%Y',
month, self.get_month_format(),
day, self.get_day_format())
qs = self.get_queryset()
if not self.get_allow_future() and date > datetime.date.today():
raise Http404("Future %s not available because %s.allow_future is False." \
% (qs.model._meta.verbose_name_plural, self.__class__.__name__))
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
date_field = self.get_date_field()
field = qs.model._meta.get_field(date_field)
lookup = _date_lookup_for_field(field, date)
qs = qs.filter(**lookup)
return super(DateDetailView, self).get_object(queryset=qs,
pk=pk, slug=slug, object_id=object_id)
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.day_format
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
def _date_from_string(year, year_format, month, month_format, day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and possibly day; raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.date(*time.strptime(datestr, format)[:3])
except ValueError:
raise Http404("Invalid date string '%s' given format '%s'" % (datestr, format))
def _month_bounds(date):
"""
Helper: return the first and last days of the month for the given date.
"""
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
return first_day, last_day
def _get_next_prev_month(generic_view, naive_result, is_previous, use_first_day):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles both next and previous months
and days (for MonthView and DayView); hence the coupling to generic_view.
However in essance the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day or month,
reguardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive month
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
# If allow_empty is True the naive value will be valid
if allow_empty:
result = naive_result
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on weather we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lte' % date_field: naive_result}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: naive_result}
ordering = date_field
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
result = None
# Convert datetimes to a dates
if hasattr(result, 'date'):
result = result.date()
# For month views, we always want to have a date that's the first of the
# month for consistancy's sake.
if result and use_first_day:
result = result.replace(day=1)
# Check against future dates.
if result and (allow_future or result < datetime.date.today()):
return result
else:
return None
def _date_lookup_for_field(field, date):
"""
Get the lookup kwargs for looking up a date against a given Field. If the
date field is a DateTimeField, we can't just do filter(df=date) because
that doesn't take the time into account. So we need to make a range lookup
in those cases.
"""
if isinstance(field, models.DateTimeField):
date_range = (
datetime.datetime.combine(date, datetime.time.min),
datetime.datetime.combine(date, datetime.time.max)
)
return {'%s__range' % field.name: date_range}
else:
return {field.name: date}
| {
"content_hash": "bd65e225ce7200197b0c1ec8ce64b3ce",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 97,
"avg_line_length": 35.010989010989015,
"alnum_prop": 0.5929587779870266,
"repo_name": "livni/old-OK",
"id": "2ab00ec53de25aa82a1368792b3bd822e47ae4ae",
"size": "19116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/knesset/hashnav/dates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "31938"
},
{
"name": "JavaScript",
"bytes": "84209"
},
{
"name": "Python",
"bytes": "1179397"
}
],
"symlink_target": ""
} |
"""Gradient checker for any ops, graphs.
The gradient checker verifies numerically that an op/graph properly
computes the gradients
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gradients
from tensorflow.python.platform import logging
def _Product(t):
if isinstance(t, int):
return t
else:
y = 1
for x in t:
y *= x
return y
def _ComputeTheoricalJacobian(x, x_shape, x_data, dy, dy_shape, dx):
"""Computes the theoretical Jacobian for dy/dx.
Computes the theoretical Jacobian using the ops generated by
ComputeGradient().
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy parray as the input data for x
dy: the tensor "dy".
dy_shape: the dimensions of dy as a tuple or an array of ints.
dx: Tensor or IndexedSlices representing dx
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "dy_size" columns where "x_size" is the number of elements in x and
"dy_size" is the number of elements in dy.
"""
# To compute the jacobian, we treat x and y are one-dimensional vectors
x_size = _Product(x_shape)
x_val_size = _Product(x_shape[1:]) # This is used for sparse gradients
dy_size = _Product(dy_shape)
jacobian = np.zeros((x_size, dy_size), dtype=x_data.dtype)
# For each of the entry of dy, we set this to be 1 and
# everything else to be 0 and compute the backprop -- this will give us one
# one column of the Jacobian matrix.
for col in range(0, dy_size):
dy_data = np.zeros(dy_shape, dtype=x_data.dtype)
dy_data.flat[col] = 1
sess = ops.get_default_session()
if isinstance(dx, ops.IndexedSlices):
backprop_indices, backprop_values = sess.run(
[dx.indices, dx.values], feed_dict={x: x_data, dy: dy_data})
for i, v in zip(backprop_indices, backprop_values):
r_begin = i * x_val_size
r_end = r_begin + x_val_size
jacobian[r_begin:r_end, col] += v.flat
else:
assert isinstance(dx, ops.Tensor), "dx = " + str(dx)
backprop = sess.run(dx, feed_dict={x: x_data, dy: dy_data})
jacobian[:, col] = backprop.reshape(x_size)
logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian)
return jacobian
def _ComputeNumericJacobian(x, x_shape, x_data, y, y_shape, delta):
"""Computes the numeric Jacobian for dy/dx.
Computes the numeric Japcobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy array as the input data for x
y: the tensor "y".
y_shape: the dimensions of y as a tuple or an array of ints.
delta: the amount of perturbation we give to the input
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in x and
"y_size" is the number of elements in y.
"""
# To compute the jacobian, we treat x and y are one-dimensional vectors
x_size = _Product(x_shape)
y_size = _Product(y_shape)
jacobian = np.zeros((x_size, y_size), dtype=x_data.dtype)
# For each of the entry of x, we slightly perturbs this by adding and
# subtracting a delta and then compute difference between the outputs. This
# will give us one row of the Jacobian matrix.
for row in range(0, x_size):
x_pos = x_data.copy()
x_pos.flat[row] += delta
y_pos = y.eval(feed_dict={x: x_pos})
x_neg = x_data.copy()
x_neg.flat[row] -= delta
y_neg = y.eval(feed_dict={x: x_neg})
diff = (y_pos - y_neg) / (2 * delta)
jacobian[row, :] = diff.reshape(y_size)
logging.vlog(1, "Numeric Jacobian =\n%s", jacobian)
return jacobian
def _ComputeDxAndDy(x, y, y_shape):
"""Returns a node to compute gradient of x wrt y."""
# We make up a dy so that we can compute the gradients. We don't really use
# the value of dy -- we will always feed it. We need to add an identity node
# so that we can always feed it properly. Otherwise, for the Add operation,
# dx is the same as dy and we cannot fetch the tensor that we are feeding.
with x.graph.as_default():
dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=y.dtype)
dy = array_ops.identity(dy_orig)
# We compute the gradients for x wrt. y
grads = gradients.gradients(y, x, dy)
assert len(grads) == 1
return grads[0], dy_orig
def _ComputeGradient(x, x_shape, dx, y, y_shape, dy,
x_init_value=None, delta=1e-3):
"""Computes the theoretical and numerical jacobian."""
t = types.as_dtype(x.dtype)
allowed_types = [types.float32, types.float64]
assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
t2 = types.as_dtype(y.dtype)
assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name
if x_init_value is not None:
i_shape = list(x_init_value.shape)
assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
x_shape, i_shape)
x_data = x_init_value
else:
if t == types.float32:
dtype = np.float32
else:
dtype = np.float64
x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype)
jacob_t = _ComputeTheoricalJacobian(x, x_shape, x_data, dy, y_shape, dx)
jacob_n = _ComputeNumericJacobian(x, x_shape, x_data, y, y_shape, delta)
return jacob_t, jacob_n
def _ComputeGradientList(
x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None):
"""Compute gradients for a list of x values."""
assert isinstance(x, list)
dx, dy = zip(*[_ComputeDxAndDy(xi, y, y_shape) for xi in x])
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
if x_init_value is None:
x_init_value = [None] * len(x)
ret = [_ComputeGradient(xi, x_shapei, dxi, y, y_shape, dyi,
x_init_valuei, delta)
for xi, x_shapei, dxi, dyi, x_init_valuei in
zip(x, x_shape, dx, dy, x_init_value)]
return ret
def ComputeGradient(
x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None):
"""Computes and returns the theoretical and numerical Jacobian.
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): remove this argument.
Returns:
Two 2-d numpy arrays representing the theoretical and numerical
Jacobian for dy/dx. Each has "x_size" rows and "y_size" columns
where "x_size" is the number of elements in x and "y_size" is the
number of elements in y. If x is a list, returns a list of two numpy arrays.
"""
if isinstance(x, list):
return _ComputeGradientList(x, x_shape, y, y_shape, x_init_value,
delta, init_targets)
else:
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
dx, dy = _ComputeDxAndDy(x, y, y_shape)
ret = _ComputeGradient(x, x_shape, dx, y, y_shape, dy, x_init_value, delta)
return ret
def ComputeGradientError(
x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None):
"""Computes the gradient error.
Computes the maximum error for dy/dx between the computed Jacobian and the
numerically estimated Jacobian.
This function will modify the tensors passed in as it adds more operations
and hence changing the consumers of the operations of the input tensors.
This function adds operations to the current session. To compute the error
using a particular device, such as a GPU, use the standard methods for
setting a device (e.g. using with sess.graph.device() or setting a device
function in the session constructor).
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): Remove this argument.
Returns:
The maximum error in between the two Jacobians.
"""
grad = ComputeGradient(x, x_shape, y, y_shape, x_init_value,
delta, init_targets)
if isinstance(grad, tuple):
grad = [grad]
return max(np.fabs(j_t - j_n).max() for j_t, j_n in grad)
| {
"content_hash": "7ab5d609c80db12206b9d37213400093",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 80,
"avg_line_length": 37.733333333333334,
"alnum_prop": 0.6727291623363126,
"repo_name": "MemeticParadigm/TensorFlow",
"id": "1adf22d512588d56787550cc607014e99c766ce0",
"size": "9622",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/gradient_checker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127104"
},
{
"name": "C++",
"bytes": "4901913"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "637241"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "45213"
},
{
"name": "Python",
"bytes": "2473570"
},
{
"name": "Shell",
"bytes": "1714"
},
{
"name": "TypeScript",
"bytes": "237446"
}
],
"symlink_target": ""
} |
"""
Created on Thu Dec 29 20:19:25 2016
@author: simon
"""# This is the implement of Fig 4
"""Anchor vectors in LeNet-5 for the MNIST dataset change during training:"""
###########
# Note: this part is drawing paraments from LeNet-5.
import numpy as np
from sklearn import preprocessing
from matplotlib import pyplot as plt
# set up subplots
fig, ax = plt.subplots(nrows=1,ncols=2, sharey=True)
# process and draw change of anchor vectors during training.
def draw_conv(dx,pl,name,y=1):
size= dx[0].shape
dxanchor = np.zeros(size[0]*size[1]*size[2])
# vectorize all tensor of filter
for i in range(len(dx)):
dxanchor = np.vstack((dxanchor, dx[i].reshape((size[0]*size[1]*size[2],size[3])).T[0]))
print(dxanchor.shape)
# normalization them and make them l2-norm = 1
dxanchor = preprocessing.normalize(dxanchor, norm='l2')
# change: cos(\theta): inner produce of normalized vectors
change = []
for j in range(dxanchor.shape[0]):
if j >0:
change += [np.inner(dxanchor[j],dxanchor[j-1])]
#plot change
pl.plot(range(len(change)), np.array(change), linewidth=3, alpha=0.5)
#plot y=1
pl.plot(range(len(change)), np.ones(len(change)), linewidth=1.5, color = 'g')
# set x-axis and y-axis, title
pl.set_ylim(0.999999, 1.0000001)
#pl.set_xlim(1, 300)
pl.set_title(name)
if y==1:
pl.set_ylabel('Change of Anchor Vector')
pl.set_xlabel('Iterations')
pl.yaxis.get_major_formatter().set_powerlimits((0,1))
return change
# never used, never sure about whether this part of codes work
def draw_fc(dx,pl,name):
size= dx[0].shape
dxanchor = np.zeros(size[1])
for i in range(len(dx)):
dxanchor = np.vstack((dxanchor, dx[i].reshape((size[0]*size[1]*size[2],size[3])).T[0]))
print(dxanchor.shape)
dxanchor = preprocessing.normalize(dxanchor, norm='l2')
change = []
for j in range(dxanchor.shape[0]):
if j >0:
change += [np.inner(dxanchor[j],dxanchor[j-1])]
pl.plot(range(len(change)), np.array(change))
pl.set_ylim(0.999999, 1)
#pl.xlim(1, 300)
pl.set_title(name)
change1 = draw_conv(w1_list, ax[0], 'Conv-1')
change2 = draw_conv(w2_list, ax[1], 'Conv-2',0)
#draw_fc(w3_list,ax[2],'Fc-3')
| {
"content_hash": "0999718bde5d09a56921a6c7b9fecaa9",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 95,
"avg_line_length": 30.773333333333333,
"alnum_prop": 0.6304159445407279,
"repo_name": "XinDongol/RECOS",
"id": "4b256f5c7178d64ffbcb8f55fbb6632977f78441",
"size": "2355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exp2/draw.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "1695"
},
{
"name": "Python",
"bytes": "46293"
}
],
"symlink_target": ""
} |
import socket
UDP_IP = "10.27.15.13" #ip of phone, use ifconfog in terminal
UDP_PORT = 1337
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
def move(x,y):
data = str(x)+","+str(y)
try:
sock.sendto(data, (UDP_IP, UDP_PORT))
except:
pass
| {
"content_hash": "b4766ffb997a4984e6f9169fbb15e97d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 61,
"avg_line_length": 22.352941176470587,
"alnum_prop": 0.5947368421052631,
"repo_name": "helloworldC2/VirtualRobot",
"id": "b84a1c0ed6c8c3c5fe7156d92a78635d01ff4d98",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VRClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1103753"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Page'
db.create_table(u'main_page', (
('id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CustomUser'], null=True, blank=True)),
('creator_session_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('creator_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('published_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)),
('text_writability', self.gf('django.db.models.fields.IntegerField')(default=3)),
('image_writability', self.gf('django.db.models.fields.IntegerField')(default=3)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('short_url', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, blank=True)),
('bg_color', self.gf('django.db.models.fields.CharField')(default='#fafafa', max_length=32, blank=True)),
('bg_texture', self.gf('django.db.models.fields.CharField')(default='light_wool_midalpha.png', max_length=1024, blank=True)),
('bg_fn', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('default_textitem_color', self.gf('django.db.models.fields.CharField')(default='#000', max_length=32, blank=True)),
('default_textitem_bg_color', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('default_textitem_font_size', self.gf('django.db.models.fields.PositiveIntegerField')(default=13, null=True, blank=True)),
('default_textitem_font', self.gf('django.db.models.fields.CharField')(default='Arial', max_length=32, blank=True)),
('default_textitem_bg_texture', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('use_custom_admin_style', self.gf('django.db.models.fields.BooleanField')(default=False)),
('admin_textitem_color', self.gf('django.db.models.fields.CharField')(default='#000', max_length=32, blank=True)),
('admin_textitem_bg_color', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('admin_textitem_font_size', self.gf('django.db.models.fields.PositiveIntegerField')(default=13, null=True, blank=True)),
('admin_textitem_bg_texture', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('admin_textitem_font', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
))
db.send_create_signal(u'main', ['Page'])
# Adding model 'TextItem'
db.create_table(u'main_textitem', (
('id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Page'])),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CustomUser'], null=True)),
('creator_window_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('creator_session_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('creator_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('x', self.gf('django.db.models.fields.IntegerField')()),
('y', self.gf('django.db.models.fields.IntegerField')()),
('height', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('width', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('border_color', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('border_width', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('border_radius', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('editable', self.gf('django.db.models.fields.BooleanField')(default=False)),
('link_to_url', self.gf('django.db.models.fields.TextField')(blank=True)),
('color', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('bg_color', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('bg_texture', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('font_size', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('font', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
))
db.send_create_signal(u'main', ['TextItem'])
# Adding model 'ImageItem'
db.create_table(u'main_imageitem', (
('id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Page'])),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CustomUser'], null=True)),
('creator_window_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('creator_session_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('creator_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('x', self.gf('django.db.models.fields.IntegerField')()),
('y', self.gf('django.db.models.fields.IntegerField')()),
('height', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('width', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('border_color', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('border_width', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('border_radius', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('src', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('link_to_url', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'main', ['ImageItem'])
# Adding model 'EmbedItem'
db.create_table(u'main_embeditem', (
('id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Page'])),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CustomUser'], null=True)),
('creator_window_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('creator_session_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('creator_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('x', self.gf('django.db.models.fields.IntegerField')()),
('y', self.gf('django.db.models.fields.IntegerField')()),
('height', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('width', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('border_color', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('border_width', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('border_radius', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('original_url', self.gf('django.db.models.fields.TextField')(blank=True)),
('embedly_data', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'main', ['EmbedItem'])
# Adding model 'Membership'
db.create_table(u'main_membership', (
('id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Page'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CustomUser'])),
))
db.send_create_signal(u'main', ['Membership'])
# Adding unique constraint on 'Membership', fields ['page', 'user']
db.create_unique(u'main_membership', ['page_id', 'user_id'])
# Adding model 'PageView'
db.create_table(u'main_pageview', (
('id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CustomUser'], null=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Page'])),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('sessionid', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
))
db.send_create_signal(u'main', ['PageView'])
# Adding model 'CustomUser'
db.create_table('auth_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'main', ['CustomUser'])
# Adding M2M table for field groups on 'CustomUser'
db.create_table('auth_user_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('customuser', models.ForeignKey(orm[u'main.customuser'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique('auth_user_groups', ['customuser_id', 'group_id'])
# Adding M2M table for field user_permissions on 'CustomUser'
db.create_table('auth_user_user_permissions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('customuser', models.ForeignKey(orm[u'main.customuser'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique('auth_user_user_permissions', ['customuser_id', 'permission_id'])
# Adding model 'Follow'
db.create_table(u'main_follow', (
('id', self.gf('uuidfield.fields.UUIDField')(unique=True, max_length=32, primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='friends', to=orm['main.CustomUser'])),
('target', self.gf('django.db.models.fields.related.ForeignKey')(related_name='followers', to=orm['main.CustomUser'])),
))
db.send_create_signal(u'main', ['Follow'])
# Adding unique constraint on 'Follow', fields ['user', 'target']
db.create_unique(u'main_follow', ['user_id', 'target_id'])
def backwards(self, orm):
# Removing unique constraint on 'Follow', fields ['user', 'target']
db.delete_unique(u'main_follow', ['user_id', 'target_id'])
# Removing unique constraint on 'Membership', fields ['page', 'user']
db.delete_unique(u'main_membership', ['page_id', 'user_id'])
# Deleting model 'Page'
db.delete_table(u'main_page')
# Deleting model 'TextItem'
db.delete_table(u'main_textitem')
# Deleting model 'ImageItem'
db.delete_table(u'main_imageitem')
# Deleting model 'EmbedItem'
db.delete_table(u'main_embeditem')
# Deleting model 'Membership'
db.delete_table(u'main_membership')
# Deleting model 'PageView'
db.delete_table(u'main_pageview')
# Deleting model 'CustomUser'
db.delete_table('auth_user')
# Removing M2M table for field groups on 'CustomUser'
db.delete_table('auth_user_groups')
# Removing M2M table for field user_permissions on 'CustomUser'
db.delete_table('auth_user_user_permissions')
# Deleting model 'Follow'
db.delete_table(u'main_follow')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.customuser': {
'Meta': {'object_name': 'CustomUser', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'main.embeditem': {
'Meta': {'object_name': 'EmbedItem'},
'border_color': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'border_radius': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'border_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.CustomUser']", 'null': 'True'}),
'creator_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'creator_session_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'creator_window_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'embedly_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'original_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Page']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.IntegerField', [], {}),
'y': ('django.db.models.fields.IntegerField', [], {})
},
u'main.follow': {
'Meta': {'unique_together': "[['user', 'target']]", 'object_name': 'Follow'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': u"orm['main.CustomUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'friends'", 'to': u"orm['main.CustomUser']"})
},
u'main.imageitem': {
'Meta': {'object_name': 'ImageItem'},
'border_color': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'border_radius': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'border_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.CustomUser']", 'null': 'True'}),
'creator_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'creator_session_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'creator_window_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'link_to_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Page']"}),
'src': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.IntegerField', [], {}),
'y': ('django.db.models.fields.IntegerField', [], {})
},
u'main.membership': {
'Meta': {'unique_together': "[['page', 'user']]", 'object_name': 'Membership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Page']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.CustomUser']"})
},
u'main.page': {
'Meta': {'object_name': 'Page'},
'admin_textitem_bg_color': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'admin_textitem_bg_texture': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'admin_textitem_color': ('django.db.models.fields.CharField', [], {'default': "'#000'", 'max_length': '32', 'blank': 'True'}),
'admin_textitem_font': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'admin_textitem_font_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '13', 'null': 'True', 'blank': 'True'}),
'bg_color': ('django.db.models.fields.CharField', [], {'default': "'#fafafa'", 'max_length': '32', 'blank': 'True'}),
'bg_fn': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bg_texture': ('django.db.models.fields.CharField', [], {'default': "'light_wool_midalpha.png'", 'max_length': '1024', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'creator_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'creator_session_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'default_textitem_bg_color': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'default_textitem_bg_texture': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'default_textitem_color': ('django.db.models.fields.CharField', [], {'default': "'#000'", 'max_length': '32', 'blank': 'True'}),
'default_textitem_font': ('django.db.models.fields.CharField', [], {'default': "'Arial'", 'max_length': '32', 'blank': 'True'}),
'default_textitem_font_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '13', 'null': 'True', 'blank': 'True'}),
'id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'image_writability': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.CustomUser']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'short_url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'text_writability': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'use_custom_admin_style': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'main.pageview': {
'Meta': {'object_name': 'PageView'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Page']"}),
'sessionid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.CustomUser']", 'null': 'True'})
},
u'main.textitem': {
'Meta': {'object_name': 'TextItem'},
'bg_color': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bg_texture': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'border_color': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'border_radius': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'border_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.CustomUser']", 'null': 'True'}),
'creator_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'creator_session_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'creator_window_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'font': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'font_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'link_to_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Page']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.IntegerField', [], {}),
'y': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['main'] | {
"content_hash": "f90f43c2fd9222a6662e36b7817cab43",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 187,
"avg_line_length": 81.09685863874346,
"alnum_prop": 0.5894961102682462,
"repo_name": "reverie/jotleaf.com",
"id": "e69052fbdff6b67c20671e53b3de4612b1dca6ae",
"size": "31003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jotleaf/main/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "260767"
},
{
"name": "CoffeeScript",
"bytes": "236120"
},
{
"name": "HTML",
"bytes": "133720"
},
{
"name": "JavaScript",
"bytes": "279484"
},
{
"name": "PHP",
"bytes": "865"
},
{
"name": "Python",
"bytes": "442635"
},
{
"name": "Shell",
"bytes": "1026"
}
],
"symlink_target": ""
} |
import Gaffer
import GafferImage
import GafferUI
import imath
Gaffer.Metadata.registerNode(
GafferImage.CopyViews,
"description",
"""
Copies views from the secondary input images onto the primary input image.
Only works with multi-view images.
""",
plugs = {
"views" : [
"description",
"""
The names of the views to copy. Names should be
separated by spaces and can use Gaffer's standard
wildcards.
""",
],
}
)
| {
"content_hash": "9193088fcebc82a2ac8677959f867cc4",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 14.516129032258064,
"alnum_prop": 0.6866666666666666,
"repo_name": "andrewkaufman/gaffer",
"id": "743bc467391872743a4c120876c947b4c15d3009",
"size": "2254",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "python/GafferImageUI/CopyViewsUI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9572701"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10279312"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14580"
}
],
"symlink_target": ""
} |
'''
Interactive Brokers的gateway接入,已经替换为vn.ib封装。
注意事项:
1. ib api只能获取和操作当前连接后下的单,并且每次重启程序后,之前下的单子收不到
2. ib api的成交也只会推送当前连接后的成交
3. ib api的持仓和账户更新可以订阅成主推模式,因此qryAccount和qryPosition就用不到了
4. 目前只支持股票和期货交易,ib api里期权合约的确定是基于Contract对象的多个字段,比较复杂暂时没做
5. 海外市场的交易规则和国内有很多细节上的不同,所以一些字段类型的映射可能不合理,如果发现问题欢迎指出
'''
import os
import json
import calendar
from datetime import datetime, timedelta
from copy import copy
from vnib import *
from vtGateway import *
from language import text
# 以下为一些VT类型和CTP类型的映射字典
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = 'LMT'
priceTypeMap[PRICETYPE_MARKETPRICE] = 'MKT'
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = 'BUY'
#directionMap[DIRECTION_SHORT] = 'SSHORT' # SSHORT在IB系统中代表对股票的融券做空(而不是国内常见的卖出)
directionMap[DIRECTION_SHORT] = 'SELL' # 出于和国内的统一性考虑,这里选择把IB里的SELL印射为vt的SHORT
directionMapReverse = {v: k for k, v in directionMap.items()}
directionMapReverse['BOT'] = DIRECTION_LONG
directionMapReverse['SLD'] = DIRECTION_SHORT
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SMART] = 'SMART'
exchangeMap[EXCHANGE_NYMEX] = 'NYMEX'
exchangeMap[EXCHANGE_GLOBEX] = 'GLOBEX'
exchangeMap[EXCHANGE_IDEALPRO] = 'IDEALPRO'
exchangeMap[EXCHANGE_HKEX] = 'HKEX'
exchangeMap[EXCHANGE_HKFE] = 'HKFE'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 报单状态映射
orderStatusMap = {}
orderStatusMap[STATUS_NOTTRADED] = 'Submitted'
orderStatusMap[STATUS_ALLTRADED] = 'Filled'
orderStatusMap[STATUS_CANCELLED] = 'Cancelled'
orderStatusMapReverse = {v:k for k,v in orderStatusMap.items()}
orderStatusMapReverse['PendingSubmit'] = STATUS_UNKNOWN # 这里未来视乎需求可以拓展vt订单的状态类型
orderStatusMapReverse['PendingCancel'] = STATUS_UNKNOWN
orderStatusMapReverse['PreSubmitted'] = STATUS_UNKNOWN
orderStatusMapReverse['Inactive'] = STATUS_UNKNOWN
# 合约类型映射
productClassMap = {}
productClassMap[PRODUCT_EQUITY] = 'STK'
productClassMap[PRODUCT_FUTURES] = 'FUT'
productClassMap[PRODUCT_OPTION] = 'OPT'
productClassMap[PRODUCT_FOREX] = 'CASH'
productClassMap[PRODUCT_INDEX] = 'IND'
productClassMapReverse = {v:k for k,v in productClassMap.items()}
# 期权类型映射
optionTypeMap = {}
optionTypeMap[OPTION_CALL] = 'CALL'
optionTypeMap[OPTION_PUT] = 'PUT'
optionTypeMap = {v:k for k,v in optionTypeMap.items()}
# 货币类型映射
currencyMap = {}
currencyMap[CURRENCY_USD] = 'USD'
currencyMap[CURRENCY_CNY] = 'CNY'
currencyMap[CURRENCY_HKD] = 'HKD'
currencyMap = {v:k for k,v in currencyMap.items()}
# Tick数据的Field和名称映射
tickFieldMap = {}
tickFieldMap[0] = 'bidVolume1'
tickFieldMap[1] = 'bidPrice1'
tickFieldMap[2] = 'askPrice1'
tickFieldMap[3] = 'askVolume1'
tickFieldMap[4] = 'lastPrice'
tickFieldMap[5] = 'lastVolume'
tickFieldMap[6] = 'highPrice'
tickFieldMap[7] = 'lowPrice'
tickFieldMap[8] = 'volume'
tickFieldMap[9] = 'preClosePrice'
tickFieldMap[14] = 'openPrice'
tickFieldMap[22] = 'openInterest'
# Account数据Key和名称的映射
accountKeyMap = {}
accountKeyMap['NetLiquidationByCurrency'] = 'balance'
accountKeyMap['NetLiquidation'] = 'balance'
accountKeyMap['UnrealizedPnL'] = 'positionProfit'
accountKeyMap['AvailableFunds'] = 'available'
accountKeyMap['MaintMarginReq'] = 'margin'
########################################################################
class IbGateway(VtGateway):
"""IB接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='IB'):
"""Constructor"""
super(IbGateway, self).__init__(eventEngine, gatewayName)
self.host = EMPTY_STRING # 连接地址
self.port = EMPTY_INT # 连接端口
self.clientId = EMPTY_INT # 用户编号
self.accountCode = EMPTY_STRING # 账户编号
self.tickerId = 0 # 订阅行情时的代码编号
self.tickDict = {} # tick快照字典,key为tickerId,value为VtTickData对象
self.tickProductDict = {} # tick对应的产品类型字典,key为tickerId,value为产品类型
self.orderId = 0 # 订单编号
self.orderDict = {} # 报单字典,key为orderId,value为VtOrderData对象
self.accountDict = {} # 账户字典
self.contractDict = {} # 合约字典
self.subscribeReqDict = {} # 用来保存订阅请求的字典
self.connected = False # 连接状态
self.api = IbWrapper(self) # API接口
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
fileName = self.gatewayName + '_connect.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.LOADING_ERROR
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
self.host = str(setting['host'])
self.port = int(setting['port'])
self.clientId = int(setting['clientId'])
self.accountCode = str(setting['accountCode'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.CONFIG_KEY_MISSING
self.onLog(log)
return
# 发起连接
self.api.eConnect(self.host, self.port, self.clientId, False)
# 查询服务器时间
self.api.reqCurrentTime()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
# 如果尚未连接行情,则将订阅请求缓存下来后直接返回
if not self.connected:
self.subscribeReqDict[subscribeReq.symbol] = subscribeReq
return
contract = Contract()
contract.localSymbol = str(subscribeReq.symbol)
contract.exchange = exchangeMap.get(subscribeReq.exchange, '')
contract.secType = productClassMap.get(subscribeReq.productClass, '')
contract.currency = currencyMap.get(subscribeReq.currency, '')
contract.expiry = subscribeReq.expiry
contract.strike = subscribeReq.strikePrice
contract.right = optionTypeMap.get(subscribeReq.optionType, '')
# 获取合约详细信息
self.tickerId += 1
self.api.reqContractDetails(self.tickerId, contract)
# 创建合约对象并保存到字典中
ct = VtContractData()
ct.gatewayName = self.gatewayName
ct.symbol = str(subscribeReq.symbol)
ct.exchange = subscribeReq.exchange
ct.vtSymbol = '.'.join([ct.symbol, ct.exchange])
ct.productClass = subscribeReq.productClass
self.contractDict[ct.vtSymbol] = ct
# 订阅行情
self.tickerId += 1
self.api.reqMktData(self.tickerId, contract, '', False, TagValueList())
# 创建Tick对象并保存到字典中
tick = VtTickData()
tick.symbol = subscribeReq.symbol
tick.exchange = subscribeReq.exchange
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.gatewayName = self.gatewayName
self.tickDict[self.tickerId] = tick
self.tickProductDict[self.tickerId] = subscribeReq.productClass
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
# 增加报单号1,最后再次进行查询
# 这里双重设计的目的是为了防止某些情况下,连续发单时,nextOrderId的回调推送速度慢导致没有更新
self.orderId += 1
# 创建合约对象
contract = Contract()
contract.localSymbol = str(orderReq.symbol)
contract.exchange = exchangeMap.get(orderReq.exchange, '')
contract.secType = productClassMap.get(orderReq.productClass, '')
contract.currency = currencyMap.get(orderReq.currency, '')
contract.expiry = orderReq.expiry
contract.strike = orderReq.strikePrice
contract.right = optionTypeMap.get(orderReq.optionType, '')
contract.lastTradeDateOrContractMonth = str(orderReq.lastTradeDateOrContractMonth)
contract.multiplier = str(orderReq.multiplier)
# 创建委托对象
order = Order()
order.orderId = self.orderId
order.clientId = self.clientId
order.action = directionMap.get(orderReq.direction, '')
order.lmtPrice = orderReq.price
order.totalQuantity = orderReq.volume
order.orderType = priceTypeMap.get(orderReq.priceType, '')
# 发送委托
self.api.placeOrder(self.orderId, contract, order)
# 查询下一个有效编号
self.api.reqIds(1)
# 返回委托编号
vtOrderID = '.'.join([self.gatewayName, str(self.orderId)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api.cancelOrder(int(cancelOrderReq.orderID))
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.NONEED_TO_QRYACCOUNT
self.onLog(log)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.NONEED_TO_QRYPOSITION
self.onLog(log)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.eDisconnect()
########################################################################
class IbWrapper(IbApi):
"""IB回调接口的实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(IbWrapper, self).__init__()
self.apiStatus = False # 连接状态
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.tickDict = gateway.tickDict # tick快照字典,key为tickerId,value为VtTickData对象
self.orderDict = gateway.orderDict # order字典
self.accountDict = gateway.accountDict # account字典
self.contractDict = gateway.contractDict # contract字典
self.tickProductDict = gateway.tickProductDict
self.subscribeReqDict = gateway.subscribeReqDict
#----------------------------------------------------------------------
def nextValidId(self, orderId):
""""""
self.gateway.orderId = orderId
#----------------------------------------------------------------------
def currentTime(self, time):
"""连接成功后推送当前时间"""
dt = datetime.fromtimestamp(time)
t = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
self.apiStatus = True
self.gateway.connected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.API_CONNECTED.format(time=t)
self.gateway.onLog(log)
for symbol, req in self.subscribeReqDict.items():
del self.subscribeReqDict[symbol]
self.gateway.subscribe(req)
#----------------------------------------------------------------------
def connectAck(self):
""""""
pass
#----------------------------------------------------------------------
def error(self, id_, errorCode, errorString):
"""错误推送"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = errorCode
err.errorMsg = errorString.decode('GBK')
self.gateway.onError(err)
#----------------------------------------------------------------------
def accountSummary(self, reqId, account, tag, value, curency):
""""""
pass
#----------------------------------------------------------------------
def accountSummaryEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def tickPrice(self, tickerId, field, price, canAutoExecute):
"""行情价格相关推送"""
if field in tickFieldMap:
# 对于股票、期货等行情,有新价格推送时仅更新tick缓存
# 只有当发生成交后,tickString更新最新成交价时才推送新的tick
# 即bid/ask的价格变动并不会触发新的tick推送
tick = self.tickDict[tickerId]
key = tickFieldMap[field]
tick.__setattr__(key, price)
# IB的外汇行情没有成交价和时间,通过本地计算生成,同时立即推送
if self.tickProductDict[tickerId] == PRODUCT_FOREX:
tick.lastPrice = (tick.bidPrice1 + tick.askPrice1) / 2
dt = datetime.now()
tick.time = dt.strftime('%H:%M:%S.%f')
tick.date = dt.strftime('%Y%m%d')
# 行情数据更新
newtick = copy(tick)
self.gateway.onTick(newtick)
else:
print(field)
#----------------------------------------------------------------------
def tickSize(self, tickerId, field, size):
"""行情数量相关推送"""
if field in tickFieldMap:
tick = self.tickDict[tickerId]
key = tickFieldMap[field]
tick.__setattr__(key, size)
else:
print(field)
#----------------------------------------------------------------------
def tickOptionComputation(self, tickerId, tickType, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice):
""""""
pass
#----------------------------------------------------------------------
def tickGeneric(self, tickerId, tickType, value):
""""""
pass
#----------------------------------------------------------------------
def tickString(self, tickerId, tickType, value):
"""行情补充信息相关推送"""
# 如果是最新成交时间戳更新
if tickType == '45':
tick = self.tickDict[tickerId]
dt = datetime.fromtimestamp(value)
tick.time = dt.strftime('%H:%M:%S.%f')
tick.date = dt.strftime('%Y%m%d')
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureLastTradeDate, dividendImpact, dividendsToLastTradeDate):
""""""
pass
#----------------------------------------------------------------------
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld):
"""委托状态更新"""
orderId = str(orderId)
if orderId in self.orderDict:
od = self.orderDict[orderId]
else:
od = VtOrderData() # od代表orderData
od.orderID = orderId
od.vtOrderID = '.'.join([self.gatewayName, orderId])
od.gatewayName = self.gatewayName
self.orderDict[orderId] = od
od.status = orderStatusMapReverse.get(status, STATUS_UNKNOWN)
od.tradedVolume = filled
newod = copy(od)
self.gateway.onOrder(newod)
#----------------------------------------------------------------------
def openOrder(self, orderId, contract, order, orderState):
"""下达委托推送"""
orderId = str(orderId) # orderId是整数
if orderId in self.orderDict:
od = self.orderDict[orderId]
else:
od = VtOrderData() # od代表orderData
od.orderID = orderId
od.vtOrderID = '.'.join([self.gatewayName, orderId])
od.symbol = contract.localSymbol
od.exchange = exchangeMapReverse.get(contract.exchange, '')
od.vtSymbol = '.'.join([od.symbol, od.exchange])
od.gatewayName = self.gatewayName
self.orderDict[orderId] = od
od.direction = directionMapReverse.get(order.action, '')
od.price = order.lmtPrice
od.totalVolume = order.totalQuantity
newod = copy(od)
self.gateway.onOrder(newod)
#----------------------------------------------------------------------
def openOrderEnd(self):
""""""
pass
#----------------------------------------------------------------------
def winError(self, str_, lastError):
""""""
pass
#----------------------------------------------------------------------
def connectionClosed(self):
"""断线"""
self.apiStatus = False
self.gateway.connected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.API_DISCONNECTED
self.gateway.onLog(log)
#----------------------------------------------------------------------
def updateAccountValue(self, key, val, currency, accountName):
"""更新账户数据"""
# 仅逐个字段更新数据,这里对于没有currency的推送忽略
if currency:
name = '.'.join([accountName, currency])
if name in self.accountDict:
account = self.accountDict[name]
else:
account = VtAccountData()
account.accountID = name
account.vtAccountID = name
account.gatewayName = self.gatewayName
self.accountDict[name] = account
if key in accountKeyMap:
k = accountKeyMap[key]
account.__setattr__(k, float(val))
#----------------------------------------------------------------------
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName):
"""持仓更新"""
pos = VtPositionData()
pos.symbol = contract.localSymbol
pos.exchange = exchangeMapReverse.get(contract.exchange, contract.exchange)
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
pos.direction = DIRECTION_NET
pos.position = position
pos.price = averageCost
pos.vtPositionName = pos.vtSymbol
pos.gatewayName = self.gatewayName
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def updateAccountTime(self, timeStamp):
"""更新账户时间"""
# 推送数据
for account in self.accountDict.values():
newaccount = copy(account)
self.gateway.onAccount(newaccount)
#----------------------------------------------------------------------
def accountDownloadEnd(self, accountName):
""""""
pass
#----------------------------------------------------------------------
def contractDetails(self, reqId, contractDetails):
"""合约查询回报"""
symbol = contractDetails.summary.localSymbol
exchange = exchangeMapReverse.get(contractDetails.summary.exchange, EXCHANGE_UNKNOWN)
vtSymbol = '.'.join([symbol, exchange])
ct = self.contractDict.get(vtSymbol, None)
if not ct:
return
ct.name = contractDetails.longName.decode('UTF-8')
ct.priceTick = contractDetails.minTick
# 推送
self.gateway.onContract(ct)
#----------------------------------------------------------------------
def bondContractDetails(self, reqId, contractDetails):
""""""
pass
#----------------------------------------------------------------------
def contractDetailsEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def execDetails(self, reqId, contract, execution):
"""成交推送"""
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.tradeID = execution.execId
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.symbol = contract.localSymbol
trade.exchange = exchangeMapReverse.get(contract.exchange, '')
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.orderID = str(execution.orderId)
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.direction = directionMapReverse.get(execution.side, '')
trade.price = execution.price
trade.volume = execution.shares
trade.tradeTime = execution.time
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def execDetailsEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def updateMktDepth(self, id_, position, operation, side, price, size):
""""""
pass
#----------------------------------------------------------------------
def updateMktDepthL2(self, id_, position, marketMaker, operation, side, price, size):
""""""
pass
#----------------------------------------------------------------------
def updateNewsBulletin(self, msgId, msgType, newsMessage, originExch):
""""""
pass
#----------------------------------------------------------------------
def managedAccounts(self, accountsList):
"""推送管理账户的信息"""
l = accountsList.split(',')
# 请求账户数据主推更新
for account in l:
self.reqAccountUpdates(True, account)
#----------------------------------------------------------------------
def receiveFA(self, pFaDataType, cxml):
""""""
pass
#----------------------------------------------------------------------
def historicalData(self, reqId, date, open_, high, low, close, volume, barCount, WAP, hasGaps):
""""""
pass
#----------------------------------------------------------------------
def scannerParameters(self, xml):
""""""
pass
#----------------------------------------------------------------------
def scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr):
""""""
pass
#----------------------------------------------------------------------
def scannerDataEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def realtimeBar(self, reqId, time, open_, high, low, close, volume, wap, count):
""""""
pass
#----------------------------------------------------------------------
def fundamentalData(self, reqId, data):
""""""
pass
#----------------------------------------------------------------------
def deltaNeutralValidation(self, reqId, underComp):
""""""
pass
#----------------------------------------------------------------------
def tickSnapshotEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def marketDataType(self, reqId, marketDataType):
""""""
pass
#----------------------------------------------------------------------
def commissionReport(self, commissionReport):
""""""
pass
#----------------------------------------------------------------------
def position(self, account, contract, position, avgCost):
""""""
pass
#----------------------------------------------------------------------
def positionEnd(self):
""""""
pass
#----------------------------------------------------------------------
def verifyMessageAPI(self, apiData):
""""""
pass
#----------------------------------------------------------------------
def verifyCompleted(self, isSuccessful, errorText):
""""""
pass
#----------------------------------------------------------------------
def displayGroupList(self, reqId, groups):
""""""
pass
#----------------------------------------------------------------------
def displayGroupUpdated(self, reqId, contractInfo):
""""""
pass
#----------------------------------------------------------------------
def verifyAndAuthMessageAPI(self, apiData, xyzChallange):
""""""
pass
#----------------------------------------------------------------------
def verifyAndAuthCompleted(self, isSuccessful, errorText):
""""""
pass
#----------------------------------------------------------------------
def positionMulti(self, reqId, account, modelCode, contract, pos, avgCost):
""""""
pass
#----------------------------------------------------------------------
def positionMultiEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def accountUpdateMulti(self, reqId, account, modelCode, key, value, currency):
""""""
pass
#----------------------------------------------------------------------
def accountUpdateMultiEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def securityDefinitionOptionalParameter(self, reqId, exchange, underlyingConId, tradingClass, multiplier, expirations, strikes):
""""""
pass
#----------------------------------------------------------------------
def securityDefinitionOptionalParameterEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def softDollarTiers(self, reqId, tiers):
""""""
pass
| {
"content_hash": "5c0494a1567da04d9b64979741b3f6f3",
"timestamp": "",
"source": "github",
"line_count": 734,
"max_line_length": 166,
"avg_line_length": 35.775204359673026,
"alnum_prop": 0.4749990479454663,
"repo_name": "ujfjhz/vnpy",
"id": "a5edf45a75a8605cf5eef5bd6f812fdb40bb12dc",
"size": "28036",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "docker/dockerTrader/gateway/ibGateway/ibGateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "157"
},
{
"name": "C",
"bytes": "3611917"
},
{
"name": "C++",
"bytes": "9733727"
},
{
"name": "CMake",
"bytes": "44488"
},
{
"name": "Jupyter Notebook",
"bytes": "43109"
},
{
"name": "Makefile",
"bytes": "99693"
},
{
"name": "Objective-C",
"bytes": "143589"
},
{
"name": "Python",
"bytes": "9054593"
},
{
"name": "R",
"bytes": "1354"
},
{
"name": "Shell",
"bytes": "6857"
}
],
"symlink_target": ""
} |
import pytest
from pytest_bdd import (
scenarios,
then,
when,
)
from . import browsersteps
pytestmark = [
pytest.mark.bdd,
pytest.mark.usefixtures('workbook', 'admin_user'),
]
scenarios(
'title.feature',
'generics.feature',
'create_gene_disease.feature',
'curation_central.feature',
)
# https://github.com/pytest-dev/pytest-bdd/issues/124
@when('I visit "/<item_type>/"')
def i_visit_the_collection_for_item_type(browser, base_url, item_type):
url = '/{}/'.format(item_type)
browsersteps.when_i_visit_url(browser, base_url, url)
@when('I click the link with text that contains "<link_text>"')
def click_link_with_text_that_contains_link_text(browser, link_text):
browsersteps.click_link_with_text_that_contains(browser, link_text)
@then('I should see an element with the css selector ".view-item.type-<item_type>"')
def should_see_element_with_css_item_type(browser, item_type):
css = ".view-item.type-{}".format(item_type)
browsersteps.should_see_element_with_css(browser, css)
| {
"content_hash": "909e19833b8e81d7de8052d2a94b8b7e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 84,
"avg_line_length": 26.794871794871796,
"alnum_prop": 0.6947368421052632,
"repo_name": "philiptzou/clincoded",
"id": "ce83e14c1de34f273675bfc93a8cd32fed77d39c",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/clincoded/tests/features/test_generics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "91635"
},
{
"name": "Cucumber",
"bytes": "1888"
},
{
"name": "Groff",
"bytes": "109117722"
},
{
"name": "HTML",
"bytes": "371973"
},
{
"name": "JavaScript",
"bytes": "655240"
},
{
"name": "Makefile",
"bytes": "96"
},
{
"name": "Python",
"bytes": "589556"
},
{
"name": "Ruby",
"bytes": "1004"
},
{
"name": "Shell",
"bytes": "1669"
}
],
"symlink_target": ""
} |
import os
from wsgiref.simple_server import make_server
from webob import exc
from webob.dec import wsgify
import logging
from yubiauth.core.rest import application as core_rest
from yubiauth.client.rest import application as client_rest
from yubiauth.client.web import application as client_web
from yubiauth.util.static import DirectoryApp, FileApp
STATIC_ASSETS = ['js', 'css', 'img', 'favicon.ico']
class YubiAuthAPI(object):
def __init__(self):
base_dir = os.path.dirname(__file__)
static_dir = os.path.join(base_dir, 'static')
static_app = DirectoryApp(static_dir)
favicon_app = FileApp(os.path.join(static_dir, 'favicon.ico'))
self._apps = {
'core': core_rest,
'client': client_rest,
'ui': client_web,
'static': static_app,
'favicon.ico': favicon_app
}
@wsgify
def __call__(self, request):
base_path = request.environ.get('BASE_PATH', '/')
if not request.script_name and request.path_info.startswith(base_path):
request.script_name = base_path
request.path_info = request.path_info[len(base_path):]
app_key = request.path_info_pop()
if app_key in self._apps:
return request.get_response(self._apps[app_key])
raise exc.HTTPNotFound
application = YubiAuthAPI()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
httpd = make_server('localhost', 8080, application)
httpd.serve_forever()
| {
"content_hash": "282e62145ab96779b26e8676a07bd26f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 30.56,
"alnum_prop": 0.6393979057591623,
"repo_name": "Yubico/yubiauth-dpkg",
"id": "cf96569d37c7ecbff5ee5796c415d3547444efff",
"size": "2904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yubiauth/server.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "340"
},
{
"name": "CSS",
"bytes": "204"
},
{
"name": "HTML",
"bytes": "10721"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Perl",
"bytes": "4286"
},
{
"name": "Python",
"bytes": "137195"
}
],
"symlink_target": ""
} |
import mock
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
class ContextTestCase(test.NoDBTestCase):
# NOTE(danms): Avoid any cells setup by claiming we will
# do things ourselves.
USES_DB_SELF = True
def setUp(self):
super(ContextTestCase, self).setUp()
self.useFixture(o_fixture.ClearRequestContext())
def test_request_context_elevated(self):
user_ctxt = context.RequestContext('111',
'222',
is_admin=False)
self.assertFalse(user_ctxt.is_admin)
admin_ctxt = user_ctxt.elevated()
self.assertTrue(admin_ctxt.is_admin)
self.assertIn('admin', admin_ctxt.roles)
self.assertFalse(user_ctxt.is_admin)
self.assertNotIn('admin', user_ctxt.roles)
def test_request_context_sets_is_admin(self):
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_by_role(self):
ctxt = context.RequestContext('111',
'222',
roles=['administrator'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
roles=['Admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_read_deleted(self):
ctxt = context.RequestContext('111',
'222',
read_deleted='yes')
self.assertEqual('yes', ctxt.read_deleted)
ctxt.read_deleted = 'no'
self.assertEqual('no', ctxt.read_deleted)
def test_request_context_read_deleted_invalid(self):
self.assertRaises(ValueError,
context.RequestContext,
'111',
'222',
read_deleted=True)
ctxt = context.RequestContext('111', '222')
self.assertRaises(ValueError,
setattr,
ctxt,
'read_deleted',
True)
def test_service_catalog_default(self):
ctxt = context.RequestContext('111', '222')
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=[])
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=None)
self.assertEqual([], ctxt.service_catalog)
def test_service_catalog_filter(self):
service_catalog = [
{u'type': u'compute', u'name': u'nova'},
{u'type': u's3', u'name': u's3'},
{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'ec2', u'name': u'ec2'},
{u'type': u'object-store', u'name': u'swift'},
{u'type': u'identity', u'name': u'keystone'},
{u'type': u'block-storage', u'name': u'cinder'},
{u'type': None, u'name': u'S_withouttype'},
{u'type': u'vo', u'name': u'S_partofvolume'}]
volume_catalog = [{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'block-storage', u'name': u'cinder'}]
ctxt = context.RequestContext('111', '222',
service_catalog=service_catalog)
self.assertEqual(volume_catalog, ctxt.service_catalog)
def test_to_dict_from_dict_no_log(self):
warns = []
def stub_warn(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
warns.append(str(msg) % a)
self.stub_out('nova.context.LOG.warning', stub_warn)
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
context.RequestContext.from_dict(ctxt.to_dict())
self.assertEqual(0, len(warns), warns)
def test_store_when_no_overwrite(self):
# If no context exists we store one even if overwrite is false
# (since we are not overwriting anything).
ctx = context.RequestContext('111',
'222',
overwrite=False)
self.assertIs(o_context.get_current(), ctx)
def test_no_overwrite(self):
# If there is already a context in the cache a new one will
# not overwrite it if overwrite=False.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.RequestContext('333',
'444',
overwrite=False)
self.assertIs(o_context.get_current(), ctx1)
def test_get_context_no_overwrite(self):
# If there is already a context in the cache creating another context
# should not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_context()
self.assertIs(ctx1, o_context.get_current())
def test_admin_no_overwrite(self):
# If there is already a context in the cache creating an admin
# context will not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_admin_context()
self.assertIs(o_context.get_current(), ctx1)
def test_convert_from_rc_to_dict(self):
ctx = context.RequestContext(
111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
timestamp='2015-03-02T22:31:56.641629')
values2 = ctx.to_dict()
expected_values = {'auth_token': None,
'domain': None,
'is_admin': False,
'is_admin_project': True,
'project_id': 222,
'project_domain': None,
'project_name': None,
'quota_class': None,
'read_deleted': 'no',
'read_only': False,
'remote_address': None,
'request_id':
'req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
'resource_uuid': None,
'roles': [],
'service_catalog': [],
'show_deleted': False,
'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
'user_id': 111,
'user_identity': '111 222 - - -',
'user_name': None}
for k, v in expected_values.items():
self.assertIn(k, values2)
self.assertEqual(values2[k], v)
@mock.patch.object(context.policy, 'authorize')
def test_can(self, mock_authorize):
mock_authorize.return_value = True
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule)
self.assertTrue(result)
mock_authorize.assert_called_once_with(
ctxt, mock.sentinel.rule,
{'project_id': ctxt.project_id, 'user_id': ctxt.user_id})
@mock.patch.object(context.policy, 'authorize')
def test_can_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
self.assertRaises(exception.Forbidden,
ctxt.can, mock.sentinel.rule)
@mock.patch.object(context.policy, 'authorize')
def test_can_non_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule, mock.sentinel.target,
fatal=False)
self.assertFalse(result)
mock_authorize.assert_called_once_with(ctxt, mock.sentinel.rule,
mock.sentinel.target)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell(self, mock_create_ctxt_mgr, mock_rpc):
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
# Verify the existing db_connection, if any, is restored
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
mapping = objects.CellMapping(database_connection='fake://',
transport_url='fake://',
uuid=uuids.cell)
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
self.assertIsNone(ctxt.cell_uuid)
# Test again now that we have populated the cache
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_unset(self, mock_create_ctxt_mgr, mock_rpc):
"""Tests that passing None as the mapping will temporarily
untarget any previously set cell context.
"""
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
with context.target_cell(ctxt, None) as cctxt:
self.assertIsNone(cctxt.db_connection)
self.assertIsNone(cctxt.mq_connection)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
@mock.patch('nova.context.set_target_cell')
def test_target_cell_regenerates(self, mock_set):
ctxt = context.RequestContext('fake', 'fake')
# Set a non-tracked property on the context to make sure it
# does not make it to the targeted one (like a copy would do)
ctxt.sentinel = mock.sentinel.parent
with context.target_cell(ctxt, mock.sentinel.cm) as cctxt:
# Should be a different object
self.assertIsNot(cctxt, ctxt)
# Should not have inherited the non-tracked property
self.assertFalse(hasattr(cctxt, 'sentinel'),
'Targeted context was copied from original')
# Set another non-tracked property
cctxt.sentinel = mock.sentinel.child
# Make sure we didn't pollute the original context
self.assertNotEqual(ctxt.sentinel, mock.sentinel.child)
def test_get_context(self):
ctxt = context.get_context()
self.assertIsNone(ctxt.user_id)
self.assertIsNone(ctxt.project_id)
self.assertFalse(ctxt.is_admin)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_caching(self, mock_create_cm, mock_create_tport):
mock_create_cm.return_value = mock.sentinel.db_conn_obj
mock_create_tport.return_value = mock.sentinel.mq_conn_obj
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
# First call should create new connection objects.
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_called_once_with('fake://db')
mock_create_tport.assert_called_once_with('fake://mq')
# Second call should use cached objects.
mock_create_cm.reset_mock()
mock_create_tport.reset_mock()
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_not_called()
mock_create_tport.assert_not_called()
@mock.patch('nova.context.target_cell')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells(self, mock_get_inst, mock_target_cell):
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
mappings = objects.CellMappingList(objects=[mapping])
# Use a mock manager to assert call order across mocks.
manager = mock.Mock()
manager.attach_mock(mock_get_inst, 'get_inst')
manager.attach_mock(mock_target_cell, 'target_cell')
filters = {'deleted': False}
context.scatter_gather_cells(
ctxt, mappings, 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
# NOTE(melwitt): This only works without the SpawnIsSynchronous fixture
# because when the spawn is treated as synchronous and the thread
# function is called immediately, it will occur inside the target_cell
# context manager scope when it wouldn't with a real spawn.
# Assert that InstanceList.get_by_filters was called before the
# target_cell context manager exited.
get_inst_call = mock.call.get_inst(
mock_target_cell.return_value.__enter__.return_value, filters,
sort_dir='foo')
expected_calls = [get_inst_call,
mock.call.target_cell().__exit__(None, None, None)]
manager.assert_has_calls(expected_calls)
@mock.patch('nova.context.LOG.warning')
@mock.patch('eventlet.timeout.Timeout')
@mock.patch('eventlet.queue.LightQueue.get')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_timeout(self, mock_get_inst,
mock_get_result, mock_timeout,
mock_log_warning):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 not responding.
mock_get_result.side_effect = [(mapping0.uuid,
mock.sentinel.instances),
exception.CellTimeout()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIn(context.did_not_respond_sentinel, results.values())
mock_timeout.assert_called_once_with(30, exception.CellTimeout)
self.assertTrue(mock_log_warning.called)
@mock.patch('nova.context.LOG.exception')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_exception(self, mock_get_inst,
mock_log_exception):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 raising an exception.
mock_get_inst.side_effect = [mock.sentinel.instances,
test.TestingException()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIn(context.raised_exception_sentinel, results.values())
self.assertTrue(mock_log_exception.called)
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_all_cells(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, mock_get_all.return_value, 60,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_skip_cell0(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_skip_cell0(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping1], 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
def test_scatter_gather_single_cell(self, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
filters = {'deleted': False}
context.scatter_gather_single_cell(ctxt, mapping0,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping0], context.CELL_TIMEOUT,
objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
| {
"content_hash": "15771dbf282777b333123f1ec3f777a2",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 79,
"avg_line_length": 46.12978723404255,
"alnum_prop": 0.5577233522439002,
"repo_name": "gooddata/openstack-nova",
"id": "ac40f9a7cf66997e9e4364cef781d7659200a669",
"size": "22297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/test_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "43584"
},
{
"name": "Python",
"bytes": "23012372"
},
{
"name": "Shell",
"bytes": "32567"
},
{
"name": "Smarty",
"bytes": "429290"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import unittest
import urllib2
import urllib
import json
import os
class Blah(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
#self.send_request('/dump', {"yes": True})
self.send_request('/drop', {"yes": True})
#self.send_request('/dump', {"yes": True})
def tearDown(self):
pass
def prepare_data(self, data):
json_encoded = json.dumps(data)
data = urllib.urlencode({'data': json_encoded})
return data
def send_request(self, url, data=None):
full_url = "http://localhost:3000/" + str(url)
req = None
if data != None:
req = urllib2.Request(full_url, self.prepare_data(data))
else:
req = urllib2.Request(full_url)
response = urllib2.urlopen(req)
the_page = response.read()
return the_page
def send_default_poop(self, rfid='123'):
result = self.send_request('/poop', {
'rfid' : rfid
})
decoded = json.loads(result)
code = ""
if ("code" in decoded):
code = decoded["code"]
print("send_default_poop: result: ", result)
return result, code
def send_default_registration(self, code):
result = self.send_request('/register', {
'username' : 'ivankadraganova',
'password' : 'password',
'email' : '[email protected]',
'code' : code
})
print("send_default_registration: result: ", result)
return result
def send_default_alive(self, slots):
result = self.send_request('/alive', {
'slots': slots,
'key': "6x9=42"
})
print("send_default_alive: result: ", result)
return result
def send_default_alive1(self):
return self.send_default_alive([1, 0, 0, 0, 0, 0, 0, 0])
def send_default_alive0(self):
return self.send_default_alive([0, 0, 0, 0, 0, 0, 0, 0])
def send_default_status(self):
result = self.send_request('/status')
print("send_default_status: result: ", result)
return result
def test_poop_returns_code(self):
result, code = self.send_default_poop()
result = json.loads(result)
code = result["code"]
self.assertTrue("code" in result)
def test_registration(self):
result, code = self.send_default_poop()
result = self.send_default_registration(code)
self.assertEqual(result, '{"status":"ok","message":"registered"}')
def test_poop_alive1(self):
result, code = self.send_default_poop()
result = self.send_default_alive1()
self.assertEqual(result, '{"status":"ok","message":"connected","slots":[1,0,0,0,0,0,0,0]}')
def test_poop_register_alive1(self):
result, code = self.send_default_poop()
result = self.send_default_registration(code);
result = self.send_default_alive1()
self.assertEqual(result, '{"status":"ok","message":"connected","slots":[1,0,0,0,0,0,0,0]}')
def test_theft(self):
result, code = self.send_default_poop()
result = self.send_default_alive1()
result = self.send_default_alive0()
self.assertEqual(result, '{"status":"error","message":"theft","slots":[2,0,0,0,0,0,0,0]}')
def test_poop_alive1_poop_alive0(self):
result, code = self.send_default_poop()
result = self.send_default_alive1()
result, code = self.send_default_poop()
result = self.send_default_alive0()
self.assertEqual(result, '{"status":"ok","message":"disconnected","slots":[0,0,0,0,0,0,0,0]}')
def test_poop_register_alive1_poop_alive0(self):
result, code = self.send_default_poop()
result = self.send_default_registration(code)
result = self.send_default_alive1()
result, code = self.send_default_poop()
result = self.send_default_alive0()
self.assertEqual(result, '{"status":"ok","message":"disconnected","slots":[0,0,0,0,0,0,0,0]}')
def test_poop_poop(self):
result, code = self.send_default_poop()
result, code = self.send_default_poop()
self.assertEqual(code, "")
def test_poop_alive1_alive0_poop_status(self):
result, code = self.send_default_poop()
result = self.send_default_alive1()
result = self.send_default_alive0()
result = self.send_default_status()
result, code = self.send_default_poop()
result = self.send_default_status()
self.assertTrue(False)
def test_drop(self):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "197ac64fc3654c88f66ce4d2a6a9afc5",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 96,
"avg_line_length": 23.40223463687151,
"alnum_prop": 0.6653139174027214,
"repo_name": "qweqq/dwmb",
"id": "23bab4a81e95070158269bfdf8c3a23c7edca843",
"size": "4189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server_code/dwmb/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32990"
},
{
"name": "C++",
"bytes": "59517"
},
{
"name": "CSS",
"bytes": "11536"
},
{
"name": "Go",
"bytes": "8616"
},
{
"name": "HTML",
"bytes": "6587"
},
{
"name": "JavaScript",
"bytes": "8397"
},
{
"name": "KiCad",
"bytes": "438693"
},
{
"name": "Python",
"bytes": "10255"
},
{
"name": "Ruby",
"bytes": "15445"
},
{
"name": "Shell",
"bytes": "1024"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tempfile
import time
import logging
from tmdb_api import tmdb
from mutagen.mp4 import MP4, MP4Cover
from extensions import valid_output_extensions, valid_poster_extensions, tmdb_api_key
class tmdb_mp4:
def __init__(self, imdbid, tmdbid=False, original=None, language='en', logger=None):
if logger:
self.log = logger
else:
self.log = logging.getLogger(__name__)
if tmdbid:
self.log.debug("TMDB ID: %s." % tmdbid)
else:
self.log.debug("IMDB ID: %s." % imdbid)
if tmdbid is False and imdbid.startswith('tt') is not True:
imdbid = 'tt' + imdbid
self.log.debug("Correcting imdbid to %s." % imdbid)
self.imdbid = imdbid
self.original = original
for i in range(3):
try:
tmdb.configure(tmdb_api_key, language=language)
self.movie = tmdb.Movie(imdbid)
self.HD = None
self.title = self.movie.get_title()
self.genre = self.movie.get_genres()
self.shortdescription = self.movie.get_tagline()
self.description = self.movie.get_overview()
self.date = self.movie.get_release_date()
# Generate XML tags for Actors/Writers/Directors/Producers
self.xml = self.xmlTags()
break
except Exception as e:
self.log.exception("Failed to connect to tMDB, trying again in 20 seconds.")
time.sleep(20)
def writeTags(self, mp4Path, artwork=True, thumbnail=False):
self.log.info("Tagging file: %s." % mp4Path)
ext = os.path.splitext(mp4Path)[1][1:]
if ext not in valid_output_extensions:
self.log.error("File is not the correct format.")
sys.exit()
video = MP4(mp4Path)
try:
video.delete()
except IOError:
self.log.debug("Unable to clear original tags, attempting to proceed.")
video["\xa9nam"] = self.title # Movie title
video["desc"] = self.shortdescription # Short description
video["ldes"] = self.description # Long description
video["\xa9day"] = self.date # Year
video["stik"] = [9] # Movie iTunes category
if self.HD is not None:
video["hdvd"] = self.HD
if self.genre is not None:
genre = None
for g in self.genre:
if genre is None:
genre = g['name']
break
# else:
# genre += ", " + g['name']
video["\xa9gen"] = genre # Genre(s)
video["----:com.apple.iTunes:iTunMOVI"] = self.xml # XML - see xmlTags method
rating = self.rating()
if rating is not None:
video["----:com.apple.iTunes:iTunEXTC"] = rating
if artwork:
path = self.getArtwork(mp4Path)
if path is not None:
cover = open(path, 'rb').read()
if path.endswith('png'):
video["covr"] = [MP4Cover(cover, MP4Cover.FORMAT_PNG)] # png poster
else:
video["covr"] = [MP4Cover(cover, MP4Cover.FORMAT_JPEG)] # jpeg poster
if self.original:
video["\xa9too"] = "MDH:" + os.path.basename(self.original)
else:
video["\xa9too"] = "MDH:" + os.path.basename(mp4Path)
for i in range(3):
try:
self.log.info("Trying to write tags.")
video.save()
self.log.info("Tags written successfully.")
break
except IOError as e:
self.log.info("Exception: %s" % e)
self.log.exception("There was a problem writing the tags. Retrying.")
time.sleep(5)
def rating(self):
ratings = {'G': '100',
'PG': '200',
'PG-13': '300',
'R': '400',
'NC-17': '500'}
output = None
mpaa = self.movie.get_mpaa_rating()
if mpaa in ratings:
numerical = ratings[mpaa]
output = 'mpaa|' + mpaa.capitalize() + '|' + numerical + '|'
return str(output)
def setHD(self, width, height):
if width >= 1900 or height >= 1060:
self.HD = [2]
elif width >= 1260 or height >= 700:
self.HD = [1]
else:
self.HD = [0]
def xmlTags(self):
# constants
header = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"><plist version=\"1.0\"><dict>\n"
castheader = "<key>cast</key><array>\n"
writerheader = "<key>screenwriters</key><array>\n"
directorheader = "<key>directors</key><array>\n"
producerheader = "<key>producers</key><array>\n"
subfooter = "</array>\n"
footer = "</dict></plist>\n"
output = StringIO()
output.write(header)
# Write actors
output.write(castheader)
for a in self.movie.get_cast()[:5]:
if a is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % a['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write screenwriters
output.write(writerheader)
for w in self.movie.get_writers()[:5]:
if w is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % w['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write directors
output.write(directorheader)
for d in self.movie.get_directors()[:5]:
if d is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % d['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write producers
output.write(producerheader)
for p in self.movie.get_producers()[:5]:
if p is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % p['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write final footer
output.write(footer)
return output.getvalue()
output.close()
# end xmlTags
def getArtwork(self, mp4Path, filename='cover'):
# Check for local artwork in the same directory as the mp4
extensions = valid_poster_extensions
poster = None
for e in extensions:
head, tail = os.path.split(os.path.abspath(mp4Path))
path = os.path.join(head, filename + os.extsep + e)
if (os.path.exists(path)):
poster = path
self.log.info("Local artwork detected, using %s." % path)
break
# Pulls down all the poster metadata for the correct season and sorts them into the Poster object
if poster is None:
try:
poster = urlretrieve(self.movie.get_poster("l"), os.path.join(tempfile.gettempdir(), "poster-%s.jpg" % self.imdbid))[0]
except Exception as e:
self.log.error("Exception while retrieving poster %s.", str(e))
poster = None
return poster
def main():
if len(sys.argv) > 2:
mp4 = str(sys.argv[1]).replace("\\", "\\\\").replace("\\\\\\\\", "\\\\")
imdb_id = str(sys.argv[2])
tmdb_mp4_instance = tmdb_mp4(imdb_id)
if os.path.splitext(mp4)[1][1:] in valid_output_extensions:
tmdb_mp4_instance.writeTags(mp4)
else:
print("Wrong file type")
if __name__ == '__main__':
main()
| {
"content_hash": "02596fcae40d0da09f5b8e1b236ce8f5",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 198,
"avg_line_length": 36.77522935779817,
"alnum_prop": 0.5359860296869153,
"repo_name": "Filechaser/sickbeard_mp4_automator",
"id": "77461c1343ef88d13cafcf3d31c7103b521b23c0",
"size": "8017",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tmdb_mp4.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "639"
},
{
"name": "Python",
"bytes": "431318"
},
{
"name": "Shell",
"bytes": "4590"
}
],
"symlink_target": ""
} |
__all__ = ['volume_blueprint']
import os
import sys
import json
import uuid
import random
from datetime import datetime, timedelta
# dateutil
from dateutil.parser import parse as dtparse
# flask
from flask import (
Flask, request, session, g,
redirect, url_for, abort,
render_template, flash, jsonify,
Blueprint, abort,
send_from_directory,
current_app,
)
# flask login
from flask.ext.login import login_required, fresh_login_required, current_user
# flask-wtf
from flask.ext.wtf import Form
from wtforms import validators
from wtforms import TextField, PasswordField, SelectField, BooleanField
from wtforms_html5 import EmailField
# requests
import requests
from requests.auth import HTTPBasicAuth
# model
from model.db import db
from model.db import object_to_dict, objects_to_list, update_object_with_dict
from model.user import UserAccount, UserQuota
from model.host import Host
from model.volume import Volume
from model.mount import MountPoint
volume_blueprint = Blueprint('volume_blueprint', __name__)
@volume_blueprint.route('/volumes', methods=['GET'])
@login_required
def volume_volumes():
username = current_user.username
print 'volume_volumes:', locals()
# get user account properties
user_account = UserAccount.query.filter_by(username=username).one()
dct = object_to_dict(user_account)
return render_template(
'volume-volumes.html',
**dct
)
@volume_blueprint.route('/volumes/all', methods=['POST'])
@login_required
def volume_volumes_all():
username = current_user.username
usertype = current_user.usertype
print 'volume_volumes_all:', locals()
# FIXME:
if usertype != 'super':
data = {}
return jsonify(data)
volumes = Volume.query.all()
_volumes = objects_to_list(volumes)
# insert host_name
# insert mount_point_name
for _volume in _volumes:
host = Host.query.get(_volume['host_id'])
assert host is not None
mount_point = MountPoint.query.get(_volume['mount_point_id'])
assert mount_point is not None
_volume['host_name'] = host.name
_volume['mount_point_name'] = mount_point.name
data = {
'volumes': _volumes,
}
return jsonify(data)
@volume_blueprint.route('/volume/create', methods=['POST'])
@login_required
def volume_create():
username = current_user.username
usertype = current_user.usertype
_volume = request.json['volume']
print 'volume_add:', locals()
# FIXME:
if usertype != 'super':
data = {}
return jsonify(data)
host_id = _volume.get('host_id', None)
mount_point_id = _volume.get('mount_point_id', None)
name = _volume['name']
capacity = _volume['capacity']
username_ = _volume['username']
# find available host and/or mount point
if host_id is None or mount_point_id is None:
query = MountPoint.query
if host_id is not None and mount_point_id is None:
query = query.filter_by(host_id=host_id)
mount_points = query.all()
mount_points = [
m for m in mount_points
if m.capacity - m.reserved >= capacity
]
# no mount_points available
if not mount_points:
data = {
'error': 'The is no available space.'\
'Try smaller volume capacity than %s GB.' % capacity,
}
return jsonify(data)
# take first available slice
mount_points.sort(key=lambda m: m.capacity - m.reserved)
mount_point = random.choice(mount_points)
# host, mount_point
host_id = mount_point.host_id
mount_point_id = mount_point.id
# host_name
host = Host.query.get(host_id)
assert host is not None
# mount_point_name
mount_point = MountPoint.query.get(mount_point_id)
assert mount_point is not None
if mount_point.capacity - mount_point.reserved < capacity:
data = {
'error': 'The is no available space.'\
'Try smaller volume capacity than %s GB.' % capacity,
}
return jsonify(data)
# increase reserved storage at mount point
mount_point.reserved = mount_point.reserved + capacity
# insert volume into database
__volume = {
'host_id': host_id,
'mount_point_id': mount_point_id,
'name': name,
'capacity': capacity,
'username': username_,
}
__volume['created'] = __volume['updated'] = datetime.utcnow()
__volume['perm_name'] = perm_name = '%s_%s' % (username_, uuid.uuid4().hex)
volume = Volume(**__volume)
db.session.add(volume)
##
# create volume at host
url = 'http://%s:%i/dockyard/volume/create' % (host.host, host.port)
data_ = json.dumps({
'mountpoint': mount_point.mountpoint,
'name': perm_name,
'size': capacity,
})
headers = {
'content-type': 'application/json',
}
auth = HTTPBasicAuth(host.auth_username, host.auth_password)
r = requests.post(url, data=data_, headers=headers, auth=auth)
assert r.status_code == 200
##
db.session.commit()
# return response
__volume = object_to_dict(volume)
# insert host_name
# insert mount_point_name
__volume['host_name'] = host.name
__volume['mount_point_name'] = mount_point.name
data = {
'volume': __volume,
}
return jsonify(data)
@volume_blueprint.route('/volume/update', methods=['POST'])
@login_required
def volume_update():
username = current_user.username
usertype = current_user.usertype
_volume = request.json['volume']
print 'volume_update:', locals()
# FIXME:
if usertype != 'super':
data = {}
return jsonify(data)
volume = Volume.query.get(_volume['id'])
_volume['updated'] = datetime.utcnow()
update_object_with_dict(volume, _volume)
db.session.commit()
_volume = object_to_dict(volume)
# insert host_name
# insert mount_point_name
host = Host.query.get(_volume['host_id'])
assert host is not None
mount_point = MountPoint.query.get(_volume['mount_point_id'])
assert mount_point is not None
_volume['host_name'] = host.name
_volume['mount_point_name'] = mount_point.name
data = {
'volume': _volume,
}
return jsonify(data)
@volume_blueprint.route('/volume/remove', methods=['POST'])
@login_required
def volume_remove():
username = current_user.username
usertype = current_user.usertype
id = request.json['id']
print 'volume_remove:', locals()
# FIXME:
if usertype != 'super':
data = {}
return jsonify(data)
# volume
volume = Volume.query.get(id)
assert volume is not None
# host
host = Host.query.get(volume.host_id)
assert host is not None
# mount point
mount_point = MountPoint.query.get(volume.mount_point_id)
assert mount_point is not None
# decrease reserved sotrage at mount point
mount_point.reserved = mount_point.reserved - volume.capacity
##
perm_name = volume.perm_name
# delete volume at host
url = 'http://%s:%i/dockyard/volume/delete' % (host.host, host.port)
data_ = json.dumps({
'mountpoint': mount_point.mountpoint,
'name': perm_name,
})
headers = {
'content-type': 'application/json',
}
auth = HTTPBasicAuth(host.auth_username, host.auth_password)
r = requests.post(url, data=data_, headers=headers, auth=auth)
assert r.status_code == 200
##
# delete volume
db.session.delete(volume)
db.session.commit()
data = {}
return jsonify(data)
| {
"content_hash": "26434a6b338a891f232fc634b78275c6",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 79,
"avg_line_length": 26.511627906976745,
"alnum_prop": 0.606516290726817,
"repo_name": "mtasic85/dockyard",
"id": "e9cc0fd13c90adbc03c015c8e94578a59581192b",
"size": "8004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "volume.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "254778"
},
{
"name": "JavaScript",
"bytes": "1337486"
},
{
"name": "Python",
"bytes": "96781"
}
],
"symlink_target": ""
} |
import argparse
import os
import ssh_facade.logging
import ssh_facade.utils
__all__ = ['parameters']
@ssh_facade.utils.memoize
def parameters():
parser = argparse.ArgumentParser(description='SSH-Facade')
parser.add_argument(
'-i', '--identity', required=False,
help='Path to identity file.')
result = parser.parse_args()
_check_identity_file(result.identity)
return result
def _check_identity_file(identity):
if identity is None:
return
identity_path = os.path.expanduser(identity)
ssh_facade.logging.info(
__name__, 'checking identity file at "%s"', identity_path)
if not os.path.exists(identity_path):
ssh_facade.logging.info(__name__, 'file does not exist: %s', identity_path)
raise ValueError('file does not exist: {}'.format(identity_path))
ssh_facade.logging.info(
__name__, 'identity file available at "%s"', identity_path)
parameters()
| {
"content_hash": "d687705ba0dcc77f49ce1680c9a56473",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 83,
"avg_line_length": 23.85,
"alnum_prop": 0.660377358490566,
"repo_name": "rremizov/ssh-facade",
"id": "4085f3eb46443f70f69b0a23f59935abcbc0fa40",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ssh_facade/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15708"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 406b874c9e9f
Revises: 09893098abbe
Create Date: 2017-09-14 23:53:38.438189
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '406b874c9e9f'
down_revision = '09893098abbe'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('discovery', sa.Column('discovery_title', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('discovery', 'discovery_title')
# ### end Alembic commands ###
| {
"content_hash": "f96a35c1de219e1e7c431ffdef89599b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 24.035714285714285,
"alnum_prop": 0.6894502228826151,
"repo_name": "ahoarfrost/metaseek",
"id": "97bce6ae17dd33f8560763c2cff7c60e52bfd623",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/migrations/versions/406b874c9e9f_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47015"
},
{
"name": "HTML",
"bytes": "14749"
},
{
"name": "JavaScript",
"bytes": "153126"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "245103"
}
],
"symlink_target": ""
} |
"""Identity v2 Service Catalog action implementations"""
import logging
from cliff import columns as cliff_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class EndpointsColumn(cliff_columns.FormattableColumn):
def human_readable(self):
if not self._value:
return ""
ret = ''
for ep in self._value:
region = ep.get('region')
if region is None:
region = '<none>'
ret += region + '\n'
for endpoint_type in ['publicURL', 'internalURL', 'adminURL']:
url = ep.get(endpoint_type)
if url:
ret += " %s: %s\n" % (endpoint_type, url)
return ret
class ListCatalog(command.Lister):
_description = _("List services in the service catalog")
def take_action(self, parsed_args):
# Trigger auth if it has not happened yet
auth_ref = self.app.client_manager.auth_ref
if not auth_ref:
raise exceptions.AuthorizationFailure(
"Only an authorized user may issue a new token."
)
data = auth_ref.service_catalog.catalog
columns = ('Name', 'Type', 'Endpoints')
return (columns,
(utils.get_dict_properties(
s, columns,
formatters={
'Endpoints': EndpointsColumn,
},
) for s in data))
class ShowCatalog(command.ShowOne):
_description = _("Display service catalog details")
def get_parser(self, prog_name):
parser = super(ShowCatalog, self).get_parser(prog_name)
parser.add_argument(
'service',
metavar='<service>',
help=_('Service to display (type or name)'),
)
return parser
def take_action(self, parsed_args):
# Trigger auth if it has not happened yet
auth_ref = self.app.client_manager.auth_ref
if not auth_ref:
raise exceptions.AuthorizationFailure(
"Only an authorized user may issue a new token."
)
data = None
for service in auth_ref.service_catalog.catalog:
if (service.get('name') == parsed_args.service or
service.get('type') == parsed_args.service):
data = service.copy()
data['endpoints'] = EndpointsColumn(data['endpoints'])
if 'endpoints_links' in data:
data.pop('endpoints_links')
break
if not data:
LOG.error(_('service %s not found\n'), parsed_args.service)
return ((), ())
return zip(*sorted(data.items()))
| {
"content_hash": "b92f576fd5f71fe121eebb18d12ac27c",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 74,
"avg_line_length": 31.12087912087912,
"alnum_prop": 0.5526129943502824,
"repo_name": "openstack/python-openstackclient",
"id": "05d0e9aebc55f746b14f34efa4ab6256f6b9a24b",
"size": "3398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstackclient/identity/v2_0/catalog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "923"
},
{
"name": "Python",
"bytes": "5016301"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
"""
Contains integration tests for the `api` app.
"""
from datetime import datetime
from pytz import utc
import pytest
from django.contrib.auth.models import User
from utils.test_helpers import solr_test_profiles as tp
# FIXTURES AND TEST DATA
# ---------------------------------------------------------------------
# External fixtures used below can be found in
# django/sierra/conftest.py:
# api_solr_env
# basic_solr_assembler
# api_client
# pick_reference_object_having_link
# assert_obj_fields_match_serializer
# get_linked_view_and_objects
# assemble_test_records
# do_filter_search
# get_found_ids
# apiuser_with_custom_defaults
# API_ROOT: Base URL for the API we're testing.
API_ROOT = '/api/v1/'
# RESOURCE_METADATA: Lookup dict for mapping API resources to various
# parameters for setting up tests.
RESOURCE_METADATA = {
'bibs': {
'profile': 'bib',
'id_field': 'record_number',
'links': { 'items': 'items' }
},
'items': {
'profile': 'item',
'id_field': 'record_number',
'links': { 'bibs': 'parentBib', 'locations': 'location',
'itemtypes': 'itemtype', 'itemstatuses': 'itemstatus' }
},
'eresources': {
'profile': 'eresource',
'id_field': 'record_number',
'links': None
},
'itemstatuses': {
'profile': 'itemstatus',
'id_field': 'code',
'links': { 'items': 'items' }
},
'itemtypes': {
'profile': 'itype',
'id_field': 'code',
'links': { 'items': 'items' }
},
'locations': {
'profile': 'location',
'id_field': 'code',
'links': { 'items': 'items' }
}
}
# PARAMETERS__* constants contain parametrization data for certain
# tests. Each should be a tuple, where the first tuple member is a
# header string that describes the parametrization values (such as
# what you'd pass as the first arg to pytest.mark.parametrize); the
# others are single-entry dictionaries where the key is the parameter-
# list ID (such as what you'd pass to pytest.mark.parametrize via its
# `ids` kwarg) and the value is the list of parameters for that ID.
# PARAMETERS__FILTER_TESTS__INTENDED: Parameters for testing API filter
# behavior that works as intended. The provided `search` query string
# matches the `test_data` record(s) they're supposed to match.
PARAMETERS__FILTER_TESTS__INTENDED = (
'resource, test_data, search, expected',
# EXACT (`exact`) filters should match exactly the text or value
# passed to them. This is the default operator, if the client does
# not specify one.
{ 'exact text (bibs/creator) | no operator specified => exact match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator=Person, Test A. 1900-', ['TEST1'])
}, { 'exact text (bibs/creator) | one match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[exact]=Person, Test B. 1900-', ['TEST2']),
}, { 'exact text (bibs/creator) | multiple matches':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test A. 1900-'}),
('TEST3', {'creator': 'Person, Test B. 1900-'}),
), 'creator[exact]=Person, Test A. 1900-', ['TEST1', 'TEST2']),
}, { 'exact text (bibs/creator) | no matches':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[exact]=Test A. Person', None),
}, { 'exact text (bibs/creator) | negated, one match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[-exact]=Person, Test B. 1900-', ['TEST1']),
}, { 'exact string (locations/label) | one match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[exact]=TEST LABEL 1', ['TEST1']),
}, { 'exact string (locations/label) | multiple matches':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
('TEST3', {'label': 'TEST LABEL 1'}),
('TEST4', {'label': 'TEST LABEL 2'}),
), 'label[exact]=TEST LABEL 2', ['TEST2', 'TEST4']),
}, { 'exact string (locations/label) | case does not match: no match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[exact]=Test Label 2', None),
}, { 'exact string (locations/label) | punct. does not match: no match':
('locations', (
('TEST1', {'label': 'TEST-LABEL 1'}),
('TEST2', {'label': 'TEST-LABEL 2'}),
), 'label[exact]=TEST LABEL 2', None),
}, { 'exact string (locations/label) | negated, one match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[-exact]=TEST LABEL 1', ['TEST2']),
}, { 'exact int (items/copy_number) | one match':
('items', (
('TEST1', {'copy_number': 54}),
('TEST2', {'copy_number': 12}),
), 'copyNumber[exact]=54', ['TEST1']),
}, { 'exact int (items/copy_number) | multiple matches':
('items', (
('TEST1', {'copy_number': 54}),
('TEST2', {'copy_number': 12}),
('TEST3', {'copy_number': 54}),
('TEST4', {'copy_number': 12}),
), 'copyNumber[exact]=54', ['TEST1', 'TEST3']),
}, { 'exact int (items/copy_number) | no matches':
('items', (
('TEST1', {'copy_number': 54}),
('TEST2', {'copy_number': 12}),
), 'copyNumber[exact]=543', None),
}, { 'exact int (items/copy_number) | negated, one match':
('items', (
('TEST1', {'copy_number': 54}),
('TEST2', {'copy_number': 12}),
), 'copyNumber[-exact]=54', ['TEST2']),
}, { 'exact date (items/due_date) | one match':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 5, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 12, 13, 9, 0, 0,
tzinfo=utc)}),
), 'dueDate[exact]=2018-11-30T05:00:00Z', ['TEST1']),
}, { 'exact date (items/due_date) | multiple matches':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 5, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 11, 30, 5, 0, 0,
tzinfo=utc)}),
('TEST3', {'due_date': datetime(2018, 12, 13, 9, 0, 0,
tzinfo=utc)}),
), 'dueDate[exact]=2018-11-30T05:00:00Z', ['TEST1', 'TEST2']),
}, { 'exact date (items/due_date) | no matches':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 5, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 12, 13, 9, 0, 0,
tzinfo=utc)}),
), 'dueDate[exact]=1990-01-01T08:00:00Z', None),
}, { 'exact date (items/due_date) | negated, one match':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 5, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 12, 13, 9, 0, 0,
tzinfo=utc)}),
), 'dueDate[-exact]=2018-11-30T05:00:00Z', ['TEST2']),
}, { 'exact bool (bibs/suppressed) | one match':
('bibs', (
('TEST1', {'suppressed': True}),
('TEST2', {'suppressed': False}),
), 'suppressed[exact]=true', ['TEST1']),
}, { 'exact bool (bibs/suppressed) | multiple matches':
('bibs', (
('TEST1', {'suppressed': True}),
('TEST2', {'suppressed': False}),
('TEST3', {'suppressed': False}),
), 'suppressed[exact]=false', ['TEST2', 'TEST3']),
}, { 'exact bool (bibs/suppressed) | no matches':
('bibs', (
('TEST1', {'suppressed': False}),
('TEST2', {'suppressed': False}),
), 'suppressed[exact]=true', None),
}, { 'exact bool (bibs/suppressed) | negated, one match':
('bibs', (
('TEST1', {'suppressed': True}),
('TEST2', {'suppressed': False}),
), 'suppressed[-exact]=true', ['TEST2']),
},
# Note that we don't do extensive testing on multi-valued fields.
# For any operator, only one of the multiple values in a given
# field must match for the record to match.
{ 'exact multi (bibs/sudoc_numbers) | only 1 value must match':
('bibs', (
('TEST1', {'sudoc_numbers': ['Sudoc 1', 'Sudoc 2']}),
('TEST2', {'sudoc_numbers': ['Sudoc 3']}),
), 'sudocNumbers[exact]=Sudoc 1', ['TEST1']),
},
# STRING OPERATORS: `contains`, `startswith`, `endswith`, and
# `matches`. These operators only work 100% correctly with string
# fields. Due to tokenization during indexing, text fields behave
# as though they are multi-valued fields containing individual
# words, not complete strings. So, with text fields, you can filter
# by what's in a single word, but you can't filter across multiple
# words. Normalization (e.g. removing punctuation) affects things,
# as well. Cases where filtering a text field does return what
# you'd expect are here, but PARAMETERS__FILTER_TESTS__STRANGE
# contains test cases that demonstrate the odd behavior. Dates,
# integers, and boolean values don't work with string operators.
# CONTAINS (`contains`) should return records where the query text
# appears inside the field value, like a LIKE "%text%" SQL query.
{ 'contains text (bibs/creator) | one word, no punct.':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[contains]=A', ['TEST1']),
}, { 'contains text (bibs/creator) | partial word, numeric':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
('TEST3', {'creator': 'Person, Test C. 2010-'}),
), 'creator[contains]=90', ['TEST1', 'TEST2']),
}, { 'contains text (bibs/creator) | non-matching word: no match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[contains]=Persona', None),
}, { 'contains text (bibs/creator) | negated, one word, no punct.':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[-contains]=A', ['TEST2']),
}, { 'contains string (locations/label) | full match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[contains]=TEST LABEL 1', ['TEST1']),
}, { 'contains string (locations/label) | multiple words, partial':
('locations', (
('TEST1', {'label': 'TEST LABEL 1-1'}),
('TEST2', {'label': 'TEST LABEL 2-2'}),
), 'label[contains]=BEL 1-', ['TEST1']),
}, { 'contains string (locations/label) | multiple words, complete':
('locations', (
('TEST1', {'label': 'TEST LABEL 1-1'}),
('TEST2', {'label': 'TEST LABEL 2-2'}),
), 'label[contains]=LABEL 1-1', ['TEST1']),
}, { 'contains string (locations/label) | single word, partial':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[contains]=LAB', ['TEST1', 'TEST2']),
}, { 'contains string (locations/label) | single word, complete':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[contains]=LABEL', ['TEST1', 'TEST2']),
}, { 'contains string (locations/label) | non-adjacent words: no match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[contains]=TEST 1', None),
}, { 'contains string (locations/label) | negated':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[-contains]=LABEL 1', ['TEST2']),
},
# STARTS WITH (`startswith`) returns records where the beginning of
# the field value exactly matches the query text. Equivalent to a
# LIKE "text%" SQL query.
{ 'startswith text (bibs/creator) | one word, no punct.':
('bibs', (
('TEST1', {'creator': 'Per, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[startswith]=Person', ['TEST2']),
}, { 'startswith text (bibs/creator) | partial word':
('bibs', (
('TEST1', {'creator': 'Per, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[startswith]=Per', ['TEST1', 'TEST2']),
}, { 'startswith text (bibs/creator) | negated':
('bibs', (
('TEST1', {'creator': 'Per, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[-startswith]=Person', ['TEST1']),
}, { 'startswith string (locations/label) | full match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[startswith]=TEST LABEL 1', ['TEST1']),
}, { 'startswith string (locations/label) | partial match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[startswith]=TEST LAB', ['TEST1', 'TEST2']),
}, { 'startswith string (locations/label) | start mid-string: no match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[startswith]=LABEL 1', None),
}, { 'startswith string (locations/label) | partial non-match: no match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[startswith]=TEST LB', None),
}, { 'startswith string (locations/label) | negated':
('locations', (
('TEST1', {'label': 'TEST 1 LABEL'}),
('TEST2', {'label': 'TEST 2 LABEL'}),
), 'label[-startswith]=TEST 1', ['TEST2']),
},
# ENDS WITH (`endswith`) returns records where the end of the field
# value exactly matches the query text. Equivalent to a LIKE
# "%text" SQL query.
{ 'endswith text (bibs/creator) | one word, no punct.':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[endswith]=Beta', ['TEST2']),
}, { 'endswith text (bibs/creator) | partial word':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[endswith]=pha', ['TEST1']),
}, { 'endswith text (bibs/creator) | negated':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[-endswith]=Beta', ['TEST1']),
}, { 'endswith string (locations/label) | full match':
('locations', (
('TEST1', {'label': 'TEST 1 LABEL'}),
('TEST2', {'label': 'TEST 2 LABEL'}),
), 'label[endswith]=TEST 1 LABEL', ['TEST1']),
}, { 'endswith string (locations/label) | partial match':
('locations', (
('TEST1', {'label': 'TEST 1 LABEL'}),
('TEST2', {'label': 'TEST 2 LABEL'}),
), 'label[endswith]=1 LABEL', ['TEST1']),
}, { 'endswith string (locations/label) | end mid-string: no match':
('locations', (
('TEST1', {'label': 'TEST 1 LABEL'}),
('TEST2', {'label': 'TEST 2 LABEL'}),
), 'label[endswith]=1 LAB', None),
}, { 'endswith string (locations/label) | partial non-match: no match':
('locations', (
('TEST1', {'label': 'TEST 1 LABEL'}),
('TEST2', {'label': 'TEST 2 LABEL'}),
), 'label[endswith]=3 LABEL', None),
}, { 'endswith string (locations/label) | negated':
('locations', (
('TEST1', {'label': 'TEST 1 LABEL'}),
('TEST2', {'label': 'TEST 2 LABEL'}),
), 'label[-endswith]=1 LABEL', ['TEST2']),
},
# MATCHES (`matches`) treats the query text as a regular expression
# and attempts to find field values matching the regex. This is
# still vaguly experimental--it isn't used in any of the production
# systems that use the Catalog API, and it relies on Solr's regex
# implementation, which is a little quirky. (Plus we're still using
# an old version of Solr.) So, these tests aren't exhaustive--they
# just demonstrate some of the things that do work.
{ 'matches text (bibs/creator) | match on a single word':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[matches]=.[Ee]ta', ['TEST2']),
}, { 'matches text (bibs/creator) | match using pipe (e.g., or)':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[matches]=(Alpha|Beta)', ['TEST1', 'TEST2']),
}, { 'matches text (bibs/creator) | negated':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[-matches]=.[Ee]ta', ['TEST1']),
}, { 'matches string (locations/label) | ^ matches start of string':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'SECOND TEST LABEL'}),
), 'label[matches]=^TEST LABEL', ['TEST1']),
}, { 'matches string (locations/label) | $ matches end of string':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'SECOND TEST LABEL'}),
), 'label[matches]=TEST LABEL$', ['TEST2']),
}, { 'matches string (locations/label) | complex multi-word regex':
('locations', (
('TEST1', {'label': 'TEST LAB 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[matches]=LAB(EL)? (1|2)$', ['TEST1', 'TEST2']),
}, { 'matches string (locations/label) | no ^$ anchors':
('locations', (
('TEST1', {'label': 'TESTING LAB 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[matches]=TEST(ING)? LAB', ['TEST1', 'TEST2']),
},
# KEYWORDS (`keywords`) is the only filter meant to be used mainly
# with text fields. Essentially it just passes your query directly
# to Solr, limited to whatever field you query, wrapped in
# parentheses. Something like:
# creator[keywords]="william shakespeare" OR "shakespeare, william"
# is passed to Solr as:
# fq=creator:("william shakespeare" OR "shakespeare, william")
# Exact behavior of course depends on how the Solr text field is
# set up (with what indexing processes, etc.). These tests show
# that standard boolean keyword search behavior works as expected.
{ 'keywords text (bibs/creator) | single kw match':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[keywords]=Alpha', ['TEST1']),
}, { 'keywords text (bibs/creator) | multiple kw matches':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[keywords]=Test Person Alpha', ['TEST1', 'TEST2']),
}, { 'keywords text (bibs/creator) | kw phrase match':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[keywords]="Test Alpha"', ['TEST1']),
}, { 'keywords text (bibs/creator) | kw phrase, wrong order: no matches':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[keywords]="Test Person Alpha"', None),
}, { 'keywords text (bibs/creator) | kw match is case insensitive':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[keywords]="test alpha"', ['TEST1']),
}, { 'keywords text (bibs/creator) | kw boolean AND':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
('TEST3', {'creator': 'Smith, Susan B.'}),
('TEST4', {'creator': 'Baker, Joseph'}),
), 'creator[keywords]=test AND person AND alpha', ['TEST1']),
}, { 'keywords text (bibs/creator) | kw boolean OR':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
('TEST3', {'creator': 'Smith, Susan B.'}),
('TEST4', {'creator': 'Baker, Joseph'}),
), 'creator[keywords]=person OR smith', ['TEST1', 'TEST2', 'TEST3']),
}, { 'keywords text (bibs/creator) | kw parenthetical groups':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
('TEST3', {'creator': 'Smith, Susan B.'}),
('TEST4', {'creator': 'Baker, Joseph'}),
), 'creator[keywords]=baker OR (test AND alpha)', ['TEST1', 'TEST4']),
}, { 'keywords text (bibs/creator) | kw phrase with non-phrase':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
('TEST3', {'creator': 'Smith, Susan B.'}),
('TEST4', {'creator': 'Baker, Joseph'}),
), 'creator[keywords]="test alpha" smith', ['TEST1', 'TEST3']),
}, { 'keywords text (bibs/creator) | right truncation':
('bibs', (
('TEST1', {'creator': 'Per, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
('TEST3', {'creator': 'Smith, Sonia B.'}),
('TEST4', {'creator': 'Baker, Joseph'}),
), 'creator[keywords]=per*', ['TEST1', 'TEST2']),
}, { 'keywords text (bibs/creator) | left truncation':
('bibs', (
('TEST1', {'creator': 'Per, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
('TEST3', {'creator': 'Smith, Sonia B.'}),
('TEST4', {'creator': 'Baker, Joseph'}),
), 'creator[keywords]=*son', ['TEST2']),
}, { 'keywords text (bibs/creator) | left and right truncation':
('bibs', (
('TEST1', {'creator': 'Per, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
('TEST3', {'creator': 'Smith, Sonia B.'}),
('TEST4', {'creator': 'Baker, Joseph'}),
), 'creator[keywords]=*so*', ['TEST2', 'TEST3']),
}, { 'keywords text (bibs/creator) | negated':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[-keywords]=Alpha', ['TEST2']),
},
# NUMERIC OPERATORS: `gt`, `gte`, `lt`, `lte`, and `range`. These
# work with integers, dates, and also strings. The best example of
# a string field where these come in handy is call numbers, such as
# filtering by call number range. HOWEVER, call numbers are special
# because they don't strictly behave like strings and need to be
# normalized to force proper behavior. E.g., MT 20 < MT 100 -- but
# if not normalized to enforce that, MT 20 > MT 100 as a plain
# string. (Call numbers are therefore NOT used in the below tests.)
# GREATER THAN [OR EQUAL] (`gt`, `gte`)
# LESS THAN [OR EQUAL] (`lt`, `lte`)
# Return results where the value in the queried field is > (gt),
# >= (gte), < (lt), or <= (lte) the query value.
# Strings are compared like strings, from left to right:
# "20" > "100"; "100" < "20"; "BC" > "ABC"; "ABC" < "BC".
{ 'gt int (items/copy_number) | field val > query val':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[gt]=52', ['TEST53', 'TEST54', 'TEST55']),
}, { 'gte int (items/copy_number) | field val >= query val':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[gte]=52', ['TEST52', 'TEST53', 'TEST54', 'TEST55']),
}, { 'lt int (items/copy_number) | field val < query val':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[lt]=52', ['TEST50', 'TEST51']),
}, { 'lte int (items/copy_number) | field val <= query val':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[lte]=52', ['TEST50', 'TEST51', 'TEST52']),
}, { 'gt int (items/copy_number) | negated':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[-gt]=52', ['TEST50', 'TEST51', 'TEST52']),
}, { 'gte int (items/copy_number) | negated':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[-gte]=52', ['TEST50', 'TEST51']),
}, { 'lt int (items/copy_number) | negated':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[-lt]=52', ['TEST52', 'TEST53', 'TEST54', 'TEST55']),
}, { 'lte int (items/copy_number) | negated':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[-lte]=52', ['TEST53', 'TEST54', 'TEST55']),
}, { 'gt date (items/due_date) | field val > query val':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 10, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 11, 30, 16, 0, 0,
tzinfo=utc)}),
('TEST3', {'due_date': datetime(2018, 12, 01, 10, 0, 0,
tzinfo=utc)}),
('TEST4', {'due_date': datetime(2018, 12, 02, 12, 0, 0,
tzinfo=utc)}),
), 'dueDate[gt]=2018-11-30T16:00:00Z', ['TEST3', 'TEST4']),
}, { 'gte date (items/due_date) | field val >= query val':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 10, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 11, 30, 16, 0, 0,
tzinfo=utc)}),
('TEST3', {'due_date': datetime(2018, 12, 01, 10, 0, 0,
tzinfo=utc)}),
('TEST4', {'due_date': datetime(2018, 12, 02, 12, 0, 0,
tzinfo=utc)}),
), 'dueDate[gte]=2018-11-30T16:00:00Z', ['TEST2', 'TEST3', 'TEST4']),
}, { 'lt date (items/due_date) | field val < query val':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 10, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 11, 30, 16, 0, 0,
tzinfo=utc)}),
('TEST3', {'due_date': datetime(2018, 12, 01, 10, 0, 0,
tzinfo=utc)}),
('TEST4', {'due_date': datetime(2018, 12, 02, 12, 0, 0,
tzinfo=utc)}),
), 'dueDate[lt]=2018-11-30T16:00:00Z', ['TEST1']),
}, { 'lte date (items/due_date) | field val <= query val':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 10, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 11, 30, 16, 0, 0,
tzinfo=utc)}),
('TEST3', {'due_date': datetime(2018, 12, 01, 10, 0, 0,
tzinfo=utc)}),
('TEST4', {'due_date': datetime(2018, 12, 02, 12, 0, 0,
tzinfo=utc)}),
), 'dueDate[lte]=2018-11-30T16:00:00Z', ['TEST1', 'TEST2']),
}, { 'gt string (locations/label) | numeric strings':
('locations', (
('TEST1', {'label': 'A 1'}),
('TEST10', {'label': 'A 10'}),
('TEST2', {'label': 'A 2'}),
('TEST20', {'label': 'A 20'}),
), 'label[gt]=A 10', ['TEST2', 'TEST20']),
}, { 'gt string (locations/label) | alphabet strings':
('locations', (
('TEST_A1', {'label': 'A 1'}),
('TEST_A10', {'label': 'A 10'}),
('TEST_B1', {'label': 'B 1'}),
('TEST_B10', {'label': 'B 10'}),
), 'label[gt]=A 10', ['TEST_B1', 'TEST_B10']),
}, { 'gt string (locations/label) | something > nothing':
('locations', (
('TEST_A', {'label': 'A'}),
('TEST_AB', {'label': 'AB'}),
('TEST_ABC', {'label': 'ABC'}),
('TEST_ABCD', {'label': 'ABCD'}),
), 'label[gt]=AB', ['TEST_ABC', 'TEST_ABCD']),
}, { 'gt string (locations/label) | alphanumeric characters > formatting':
('locations', (
('TEST_A1', {'label': 'A-1'}),
('TEST_A2', {'label': 'A-2'}),
('TEST_AA', {'label': 'AA'}),
('TEST_AA1', {'label': 'AA-1'}),
), 'label[gt]=A-2', ['TEST_AA', 'TEST_AA1']),
}, { 'gte string (locations/label) | field val >= query val':
('locations', (
('TEST1', {'label': 'A 1'}),
('TEST10', {'label': 'A 10'}),
('TEST2', {'label': 'A 2'}),
('TEST20', {'label': 'A 20'}),
), 'label[gte]=A 10', ['TEST10', 'TEST2', 'TEST20']),
}, { 'lt string (locations/label) | field val < query val':
('locations', (
('TEST1', {'label': 'A 1'}),
('TEST10', {'label': 'A 10'}),
('TEST2', {'label': 'A 2'}),
('TEST20', {'label': 'A 20'}),
), 'label[lt]=A 10', ['TEST1']),
}, { 'lte string (locations/label) | field val <= query val':
('locations', (
('TEST1', {'label': 'A 1'}),
('TEST10', {'label': 'A 10'}),
('TEST2', {'label': 'A 2'}),
('TEST20', {'label': 'A 20'}),
), 'label[lte]=A 10', ['TEST1', 'TEST10']),
},
# OPERATORS THAT TAKE ARRAYS: The next two operators we're testing
# take arrays as arguments: `range` and `in`. Arrays are comma-
# separated lists of values that are surrounded in square brackets,
# such as: [1,2,3]. There are a few things to note about our array
# syntax.
# * Quotation marks can be used to surround any values, but they
# are optional. If used, any commas appearing between the quotation
# marks are interpreted literally, not as value separators. (Like
# most CSV syntaxes.) E.g.: ["Smith, James","Jones, Susan"] is an
# array containing two values, each of which contains a comma.
# * A backslash character can be used to escape commas you want to
# use literally (instead of using the quotation mark syntax). E.g.:
# [Smith\, James, Jones\, Susan] is equivalent to the above.
# * A backslash character escapes a quotation mark you need to use
# literally in the query. [A book about \"something\"] (includes
# the quotation marks as part of the query).
# * Spaces included after commas are interpreted literally. E.g.,
# with the array [1, 2, 3], the second value is " 2" and the third
# is " 3".
# RANGE (`range`) takes an array of two values -- [start,end] --
# and returns results where the value in the queried field is in
# the provided range. The range filter is inclusive: [1,3] matches
# both 1 and 3 (and the range of values between).
{ 'range int (items/copy_number) | multi-value range':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[range]=[52,54]', ['TEST52', 'TEST53', 'TEST54']),
}, { 'range int (items/copy_number) | single-value range':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[range]=[52,52]', ['TEST52']),
}, { 'range int (items/copy_number) | non-matching range: no matches':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[range]=[90,100]', None),
}, { 'range int (items/copy_number) | negated':
('items', (
('TEST50', {'copy_number': 50}),
('TEST51', {'copy_number': 51}),
('TEST52', {'copy_number': 52}),
('TEST53', {'copy_number': 53}),
('TEST54', {'copy_number': 54}),
('TEST55', {'copy_number': 55}),
), 'copyNumber[-range]=[52,54]', ['TEST50', 'TEST51', 'TEST55']),
}, { 'range date (items/due_date) | multi-value range':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 10, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 11, 30, 16, 0, 0,
tzinfo=utc)}),
('TEST3', {'due_date': datetime(2018, 12, 01, 10, 0, 0,
tzinfo=utc)}),
('TEST4', {'due_date': datetime(2018, 12, 02, 12, 0, 0,
tzinfo=utc)}),
), 'dueDate[range]=[2018-11-30T16:00:00Z,2018-12-02T12:00:00Z]',
['TEST2', 'TEST3', 'TEST4']),
}, { 'range string (locations/label) | multi-value range':
('locations', (
('TEST1', {'label': 'A 1'}),
('TEST10', {'label': 'A 10'}),
('TEST2', {'label': 'A 2'}),
('TEST20', {'label': 'A 20'}),
), 'label[range]=[A 1,A 2]', ['TEST1', 'TEST10', 'TEST2']),
},
# IN (`in`) takes an array of values and tries to find records
# where the queried field value exactly matches one of the values
# in the array. Equivalent to an SQL IN query. It works with all
# field types, although it shares the `exact` operator's issues
# with text fields, and querying boolean fields with IN doesn't
# make any sense.
{ 'in text (bibs/creator) | one match':
('bibs', (
('TEST1', {'creator': 'Person, Test A'}),
('TEST2', {'creator': 'Person, Test B'}),
('TEST3', {'creator': 'Person, Test C'}),
), 'creator[in]=["Person, Test A","Person, Test D"]', ['TEST1'])
}, { 'in text (bibs/creator) | multiple matches':
('bibs', (
('TEST1', {'creator': 'Person, Test A'}),
('TEST2', {'creator': 'Person, Test B'}),
('TEST3', {'creator': 'Person, Test C'}),
), 'creator[in]=["Person, Test A","Person, Test C"]',
['TEST1', 'TEST3'])
}, { 'in string (locations/label) | one match':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[in]=[TEST LABEL 1,TEST LABEL 3]', ['TEST1']),
}, { 'in string (locations/label) | multiple matches':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[in]=[TEST LABEL 1,TEST LABEL 2]', ['TEST1', 'TEST2']),
}, { 'in string (locations/label) | escape quotation marks and commas':
('locations', (
('TEST1', {'label': 'TEST "LABEL" 1'}),
('TEST2', {'label': 'TEST "LABEL" 2'}),
('TEST3', {'label': 'TEST, 3'}),
), 'label[in]=[TEST \\"LABEL\\" 1,"TEST \\"LABEL\\" 2",TEST\\, 3]',
['TEST1', 'TEST2', 'TEST3']),
}, { 'in string (locations/label) | negated':
('locations', (
('TEST1', {'label': 'TEST LABEL 1'}),
('TEST2', {'label': 'TEST LABEL 2'}),
), 'label[-in]=[TEST LABEL 1,TEST LABEL 3]', ['TEST2']),
}, { 'in int (items/copy_number) | one match':
('items', (
('TEST1', {'copy_number': 54}),
('TEST2', {'copy_number': 12}),
), 'copyNumber[in]=[12,34,91]', ['TEST2']),
}, { 'in int (items/copy_number) | mutiple matches':
('items', (
('TEST1', {'copy_number': 54}),
('TEST2', {'copy_number': 12}),
), 'copyNumber[in]=[12,34,54]', ['TEST1', 'TEST2']),
}, { 'in date (items/due_date) | one match':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 5, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 12, 13, 9, 0, 0,
tzinfo=utc)}),
), 'dueDate[in]=[2018-11-30T05:00:00Z,2019-01-30T05:00:00Z]',
['TEST1']),
}, { 'in date (items/due_date) | multiple matches':
('items', (
('TEST1', {'due_date': datetime(2018, 11, 30, 5, 0, 0,
tzinfo=utc)}),
('TEST2', {'due_date': datetime(2018, 12, 13, 9, 0, 0,
tzinfo=utc)}),
), 'dueDate[in]=[2018-11-30T05:00:00Z,2018-12-13T09:00:00Z]',
['TEST1', 'TEST2']),
},
# IS NULL (`isnull`) always takes a boolean value as the query
# argument. If false, returns records where the queried field
# exists; if true, returns records where the queried field does not
# exist. Note: behavior doesn't change based on field type, so just
# testing one type of field is sufficient.
{ 'isnull text (bibs/creator) | true: one match':
('bibs', (
('TEST1', {'creator': 'Person, Test A'}),
('TEST2', {'creator': 'Person, Test B'}),
('TEST3', {'creator': None}),
), 'creator[isnull]=true', ['TEST3'])
}, { 'isnull text (bibs/creator) | false: multiple matches':
('bibs', (
('TEST1', {'creator': 'Person, Test A'}),
('TEST2', {'creator': 'Person, Test B'}),
('TEST3', {'creator': None}),
), 'creator[isnull]=false', ['TEST1', 'TEST2'])
}, { 'isnull text (bibs/creator) | true: no matches':
('bibs', (
('TEST1', {'creator': 'Person, Test A'}),
('TEST2', {'creator': 'Person, Test B'}),
('TEST3', {'creator': 'Person, Test C'}),
), 'creator[isnull]=true', None)
}, { 'isnull text (bibs/creator) | false: no matches':
('bibs', (
('TEST1', {'creator': None}),
('TEST2', {'creator': None}),
('TEST3', {'creator': None}),
), 'creator[isnull]=false', None)
}, { 'isnull text (bibs/creator) | negated':
('bibs', (
('TEST1', {'creator': 'Person, Test A'}),
('TEST2', {'creator': 'Person, Test B'}),
('TEST3', {'creator': None}),
), 'creator[-isnull]=true', ['TEST1', 'TEST2'])
},
# SEARCH / SEARCHTYPE: The `search` argument combined with a valid
# `searchtype` conducts a full-text-style search of the targeted
# resource. It's similar to the `keywords` operator in that it
# passes your search query to Solr as a keyword query, but it
# searches multiple fields at once (rather than just one field).
# The `searchtype` argument corresponds with a set of fields,
# weights, etc. defined in api.filters.HaystackFilter that are
# passed to Solr along with the search query, for relevance
# ranking.
#
# At the moment, 'journals' and 'databases' are the two valid
# searchtypes. These were made specifically for the Bento Box
# search. The tests below use query strings like the Bento Box API
# uses.
{ 'searchtype journals | full_title match':
('bibs', (
('TEST1', {'full_title': 'Online Journal of Medicine',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST2', {'full_title': 'Journal of Medicine in Print',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST3', {'full_title': 'Puppies Today',
'alternate_titles': ['Puppies'],
'creator': 'Person, Test B.',
'full_subjects': ['Puppers', 'Doge'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST4', {'full_title': 'Texas Journal of Open Heart Surgery',
'alternate_titles': ['TJOHS'],
'creator': 'Person, Test C.',
'full_subjects': ['Hearts', 'Medicine'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST5', {'full_title': 'Book about Medicine',
'creator': 'Person, Test D.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'BOOKS',
'suppressed': False}),
('TEST6', {'full_title': 'Out-of-Print Journal of Medicine',
'creator': 'Person, Test A.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'JOURNALS',
'suppressed': True}),
), ('search="journal of medicine"'
'&searchtype=journals&suppressed=false&materialType[in]=[JOURNAL,'
'JOURNALS,EJOURNAL,EJOURNALS]'),
['TEST1', 'TEST2'])
}, { 'searchtype journals | full_subjects match':
('bibs', (
('TEST1', {'full_title': 'Online Journal of Medicine',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST2', {'full_title': 'Journal of Medicine in Print',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST3', {'full_title': 'Puppies Today',
'alternate_titles': ['Puppies'],
'creator': 'Person, Test B.',
'full_subjects': ['Puppers', 'Doge'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST4', {'full_title': 'Texas Journal of Open Heart Surgery',
'alternate_titles': ['TJOHS'],
'creator': 'Person, Test C.',
'full_subjects': ['Hearts', 'Medicine'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST5', {'full_title': 'Book about Medicine',
'creator': 'Person, Test D.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'BOOKS',
'suppressed': False}),
('TEST6', {'full_title': 'Out-of-Print Journal of Medicine',
'creator': 'Person, Test A.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'JOURNALS',
'suppressed': True}),
), ('search=puppers'
'&searchtype=journals&suppressed=false&materialType[in]=[JOURNAL,'
'JOURNALS,EJOURNAL,EJOURNALS]'),
['TEST3'])
}, { 'searchtype journals | title and subjects match':
('bibs', (
('TEST1', {'full_title': 'Online Journal of Medicine',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST2', {'full_title': 'Journal of Medicine in Print',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST3', {'full_title': 'Puppies Today',
'alternate_titles': ['Puppies'],
'creator': 'Person, Test B.',
'full_subjects': ['Puppers', 'Doge'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST4', {'full_title': 'Texas Journal of Open Heart Surgery',
'alternate_titles': ['TJOHS'],
'creator': 'Person, Test C.',
'full_subjects': ['Hearts', 'Medicine'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST5', {'full_title': 'Book about Medicine',
'creator': 'Person, Test D.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'BOOKS',
'suppressed': False}),
('TEST6', {'full_title': 'Out-of-Print Journal of Medicine',
'creator': 'Person, Test A.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'JOURNALS',
'suppressed': True}),
), ('search=medicine'
'&searchtype=journals&suppressed=false&materialType[in]=[JOURNAL,'
'JOURNALS,EJOURNAL,EJOURNALS]'),
['TEST1', 'TEST2', 'TEST4'])
}, { 'searchtype journals | alternate_titles match':
('bibs', (
('TEST1', {'full_title': 'Online Journal of Medicine',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST2', {'full_title': 'Journal of Medicine in Print',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST3', {'full_title': 'Puppies Today',
'alternate_titles': ['Puppies'],
'creator': 'Person, Test B.',
'full_subjects': ['Puppers', 'Doge'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST4', {'full_title': 'Texas Journal of Open Heart Surgery',
'alternate_titles': ['TJOHS'],
'creator': 'Person, Test C.',
'full_subjects': ['Hearts', 'Medicine'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST5', {'full_title': 'Book about Medicine',
'creator': 'Person, Test D.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'BOOKS',
'suppressed': False}),
('TEST6', {'full_title': 'Out-of-Print Journal of Medicine',
'creator': 'Person, Test A.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'JOURNALS',
'suppressed': True}),
), ('search=medjournal'
'&searchtype=journals&suppressed=false&materialType[in]=[JOURNAL,'
'JOURNALS,EJOURNAL,EJOURNALS]'),
['TEST1', 'TEST2'])
}, { 'searchtype journals | wrong suppression or mat type => no match':
('bibs', (
('TEST1', {'full_title': 'Online Journal of Medicine',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST2', {'full_title': 'Journal of Medicine in Print',
'alternate_titles': ['MedJournal'],
'creator': 'Person, Test A.',
'full_subjects': ['Hearts', 'Eyeballs', 'Brains'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST3', {'full_title': 'Puppies Today',
'alternate_titles': ['Puppies'],
'creator': 'Person, Test B.',
'full_subjects': ['Puppers', 'Doge'],
'material_type': 'JOURNALS',
'suppressed': False}),
('TEST4', {'full_title': 'Texas Journal of Open Heart Surgery',
'alternate_titles': ['TJOHS'],
'creator': 'Person, Test C.',
'full_subjects': ['Hearts', 'Medicine'],
'material_type': 'EJOURNALS',
'suppressed': False}),
('TEST5', {'full_title': 'Book about Medicine',
'creator': 'Person, Test D.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'BOOKS',
'suppressed': False}),
('TEST6', {'full_title': 'Out-of-Print Journal of Medicine',
'creator': 'Person, Test A.',
'full_subjects': ['Medicine', 'Medical stuff'],
'material_type': 'JOURNALS',
'suppressed': True}),
), ('search="medical stuff"'
'&searchtype=journals&suppressed=false&materialType[in]=[JOURNAL,'
'JOURNALS,EJOURNAL,EJOURNALS]'),
None)
}, { 'searchtype databases | title match':
('eresources', (
('TEST1', {'title': 'Medical Database',
'alternate_titles': ['MedDB'],
'subjects': ['Hearts', 'Brains', 'Medicine'],
'summary': 'This is a database about medical stuff.',
'holdings': ['Online Journal of Medicine',
'Texas Journal of Open Heart Surgery'],
'suppressed': False}),
('TEST2', {'title': 'General Database',
'alternate_titles': ['EBSCO'],
'subjects': ['Nerds', 'Sweater vests', 'Studying'],
'summary': 'Resources for all your academic needs.',
'holdings': ['English Literature Today',
'Math Today',
'History Yesterday',
'Neuroscience Today',
'Psychology Today',
'Ascots Today'],
'suppressed': False}),
('TEST3', {'title': 'English Database',
'alternate_titles': ['Tallyho', 'Bobs your uncle'],
'subjects': ['Tea', 'Football', 'The Queen'],
'summary': 'Resources for Englishmen.',
'holdings': ['English Literature Today',
'Shakespeare'],
'suppressed': False}),
), ('search="medical database"'
'&searchtype=databases&suppressed=false'),
['TEST1'])
}, { 'searchtype databases | alternate_titles match':
('eresources', (
('TEST1', {'title': 'Medical Database',
'alternate_titles': ['MedDB'],
'subjects': ['Hearts', 'Brains', 'Medicine'],
'summary': 'This is a database about medical stuff.',
'holdings': ['Online Journal of Medicine',
'Texas Journal of Open Heart Surgery'],
'suppressed': False}),
('TEST2', {'title': 'General Database',
'alternate_titles': ['EBSCO'],
'subjects': ['Nerds', 'Sweater vests', 'Studying'],
'summary': 'Resources for all your academic needs.',
'holdings': ['English Literature Today',
'Math Today',
'History Yesterday',
'Neuroscience Today',
'Psychology Today',
'Ascots Today'],
'suppressed': False}),
('TEST3', {'title': 'English Database',
'alternate_titles': ['Tallyho', 'Bobs your uncle'],
'subjects': ['Tea', 'Football', 'The Queen'],
'summary': 'Resources for Englishmen.',
'holdings': ['English Literature Today',
'Shakespeare'],
'suppressed': False}),
), ('search=EBSCO'
'&searchtype=databases&suppressed=false'),
['TEST2'])
}, { 'searchtype databases | holdings match':
('eresources', (
('TEST1', {'title': 'Medical Database',
'alternate_titles': ['MedDB'],
'subjects': ['Hearts', 'Brains', 'Medicine'],
'summary': 'This is a database about medical stuff.',
'holdings': ['Online Journal of Medicine',
'Texas Journal of Open Heart Surgery'],
'suppressed': False}),
('TEST2', {'title': 'General Database',
'alternate_titles': ['EBSCO'],
'subjects': ['Nerds', 'Sweater vests', 'Studying'],
'summary': 'Resources for all your academic needs.',
'holdings': ['English Literature Today',
'Math Today',
'History Yesterday',
'Neuroscience Today',
'Psychology Today',
'Ascots Today'],
'suppressed': False}),
('TEST3', {'title': 'English Database',
'alternate_titles': ['Tallyho', 'Bobs your uncle'],
'subjects': ['Tea', 'Football', 'The Queen'],
'summary': 'Resources for Englishmen.',
'holdings': ['English Literature Today',
'Shakespeare'],
'suppressed': False}),
), ('search=English'
'&searchtype=databases&suppressed=false'),
['TEST2', 'TEST3'])
}, { 'searchtype databases | subjects match':
('eresources', (
('TEST1', {'title': 'Medical Database',
'alternate_titles': ['MedDB'],
'subjects': ['Hearts', 'Brains', 'Medicine'],
'summary': 'This is a database about medical stuff.',
'holdings': ['Online Journal of Medicine',
'Texas Journal of Open Heart Surgery'],
'suppressed': False}),
('TEST2', {'title': 'General Database',
'alternate_titles': ['EBSCO'],
'subjects': ['Nerds', 'Sweater vests', 'Studying'],
'summary': 'Resources for all your academic needs.',
'holdings': ['English Literature Today',
'Math Today',
'History Yesterday',
'Neuroscience Today',
'Psychology Today',
'Ascots Today'],
'suppressed': False}),
('TEST3', {'title': 'English Database',
'alternate_titles': ['Tallyho', 'Bobs your uncle'],
'subjects': ['Tea', 'Football', 'The Queen'],
'summary': 'Resources for Englishmen.',
'holdings': ['English Literature Today',
'Shakespeare'],
'suppressed': False}),
), ('search=tea'
'&searchtype=databases&suppressed=false'),
['TEST3'])
}, { 'searchtype databases | summary match':
('eresources', (
('TEST1', {'title': 'Medical Database',
'alternate_titles': ['MedDB'],
'subjects': ['Hearts', 'Brains', 'Medicine'],
'summary': 'This is a database about medical stuff.',
'holdings': ['Online Journal of Medicine',
'Texas Journal of Open Heart Surgery'],
'suppressed': False}),
('TEST2', {'title': 'General Database',
'alternate_titles': ['EBSCO'],
'subjects': ['Nerds', 'Sweater vests', 'Studying'],
'summary': 'Resources for all your academic needs.',
'holdings': ['English Literature Today',
'Math Today',
'History Yesterday',
'Neuroscience Today',
'Psychology Today',
'Ascots Today'],
'suppressed': False}),
('TEST3', {'title': 'English Database',
'alternate_titles': ['Tallyho', 'Bobs your uncle'],
'subjects': ['Tea', 'Football', 'The Queen'],
'summary': 'Resources for Englishmen.',
'holdings': ['English Literature Today',
'Shakespeare'],
'suppressed': False}),
), ('search=academic'
'&searchtype=databases&suppressed=false'),
['TEST2'])
},
# MULTIPLE ARGUMENTS: Queries that use multiple arguments should
# effectively "AND" them together, returning a set of records where
# all queried fields match all query parameters.
{ 'multi-arg | multiple criteria against the same field':
('bibs', (
('TEST1', {'creator': 'Person, Test 1900-1950'}),
('TEST2', {'creator': 'Person, Test 1940-2010'}),
('TEST3', {'creator': 'Person, Test 1970-'}),
), 'creator[contains]=Person&creator[contains]=1970', ['TEST3'])
}, { 'multi-arg | multiple criteria against a multi-valued field':
('bibs', (
('TEST1', {'sudoc_numbers': ['A 1', 'A 2', 'A 3']}),
('TEST2', {'sudoc_numbers': ['B 1', 'B 2']}),
('TEST3', {'sudoc_numbers': ['A 4', 'B 3']}),
), 'sudocNumbers[startswith]=A&sudocNumbers[startswith]=B', ['TEST3'])
}, { 'multi-arg | multiple criteria against different fields':
('bibs', (
('TEST1', {'creator': 'Person, Test', 'suppressed': True}),
('TEST2', {'creator': 'Person, Test', 'suppressed': False}),
('TEST3', {'creator': 'Person, Test', 'suppressed': False}),
), 'creator=Person, Test&suppressed=false', ['TEST2', 'TEST3'])
}, { 'multi-arg | kw query with multiple criteria':
('bibs', (
('TEST1', {'creator': 'Person, Test', 'suppressed': True}),
('TEST2', {'creator': 'Person, Joe', 'suppressed': False}),
('TEST3', {'creator': 'Person, Test', 'suppressed': False}),
), 'creator[keywords]=person OR test&suppressed=false',
['TEST2', 'TEST3'])
}, { 'multi-arg | multiple criteria with negation':
('bibs', (
('TEST1', {'creator': 'Person, Test 1900-1950'}),
('TEST2', {'creator': 'Person, Test 1940-2010'}),
('TEST3', {'creator': 'Person, Test 1970-'}),
), 'creator[contains]=Person&creator[-contains]=1970',
['TEST1', 'TEST2'])
}, { 'multi-arg | kw query with multiple criteria and negation':
('bibs', (
('TEST1', {'creator': 'Smith, Test', 'suppressed': True}),
('TEST2', {'creator': 'Smith, Joe', 'suppressed': False}),
('TEST3', {'creator': 'Person, Sally', 'suppressed': False}),
), 'creator[-keywords]=person OR test&suppressed=false', ['TEST2'])
},
)
# PARAMETERS__FILTER_TESTS__STRANGE: Parameters for testing API filter
# behavior that either is unintentional, is counter to what you'd
# expect, or is ambiguous or undefined in some way (such as using
# operators with fields they weren't designed to be used with). This
# set of test parameters documents the known strange behavior. Some of
# it is legitimately buggy and we should go back and fix it later; some
# of it we may need to add validation for so we can alert the client.
# Or, it may be sufficient just to document it here.
PARAMETERS__FILTER_TESTS__STRANGE = (
'resource, test_data, search, expected',
{ 'TO_FIX: exact text (bibs/creator) | matches keywords or phrases':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[exact]=Test A.', ['TEST1'])
}, { 'TO_FIX: exact text (bibs/creator) | case does not have to match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[exact]=person, test a. 1900-', ['TEST1'])
}, { 'TO_FIX: exact text (bibs/creator) | punct. does not have to match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[exact]=person test a 1900', ['TEST1'])
}, { 'FYI: exact string (items/call_number) | CN normalization':
# Call numbers are strings, but, unlike other strings, they are
# normalized before matching, since, e.g., MT 100 == mt 100 ==
# mt100 == MT-100.
('items', (
('TEST1', {'call_number': 'MT 100.1 .C35 1995'}),
('TEST2', {'call_number': 'MT 100.1 .G322 2001'}),
), 'callNumber[exact]=mt100.1 c35 1995', ['TEST1']),
}, { 'TO_FIX: contains text (bibs/creator) | multiple words: no match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[contains]=Test A. 1900-', None),
}, { 'TO_FIX: contains text (bibs/creator) | punctuation: no match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[contains]=A.', None),
}, { 'FYI: contains string (items/call_number) | CN normalization':
# Call numbers are strings, but, unlike other strings, they are
# normalized before matching, since, e.g., MT 100 == mt 100 ==
# mt100 == MT-100.
('items', (
('TEST1', {'call_number': 'MT 100.1 .C35 1995'}),
('TEST2', {'call_number': 'MT 100.1 .G322 2001'}),
), 'callNumber[contains]=100.1 c35', ['TEST1']),
}, { 'UNSURE: contains int (items/copy_number) | no match, ever':
('items', (
('TEST32', {'copy_number': 32}),
('TEST320', {'copy_number': 320}),
('TEST3', {'copy_number': 3}),
('TEST2', {'copy_number': 2}),
('TEST321', {'copy_number': 321}),
('TEST392', {'copy_number': 392}),
('TEST932', {'copy_number': 932}),
('TEST3092', {'copy_number': 3092}),
), 'copyNumber[contains]=32', None),
}, { 'TO_FIX: startswith text (bibs/creator) | matches start of any word':
('bibs', (
('TEST1', {'creator': 'Per Test A. 1900-'}),
('TEST2', {'creator': 'Person Test B. 1900-'}),
), 'creator[startswith]=Tes', ['TEST1', 'TEST2']),
}, { 'TO_FIX: startswith text (bibs/creator) | multiple words: no match':
('bibs', (
('TEST1', {'creator': 'Per Test A. 1900-'}),
('TEST2', {'creator': 'Person Test B. 1900-'}),
), 'creator[startswith]=Person Test', None),
}, { 'TO_FIX: startswith text (bibs/creator) | punctuation: no match':
('bibs', (
('TEST1', {'creator': 'Per, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[startswith]=Person,', None),
}, { 'FYI: startswith string (items/call_number) | CN normalization':
# Call numbers are strings, but, unlike other strings, they are
# normalized before matching, since, e.g., MT 100 == mt 100 ==
# mt100 == MT-100.
('items', (
('TEST1', {'call_number': 'MT 100.1 .C35 1995'}),
('TEST2', {'call_number': 'MT 100.1 .G322 2001'}),
), 'callNumber[startswith]=MT100', ['TEST1', 'TEST2']),
}, { 'UNSURE: startswith int (items/copy_number) | no match, ever':
('items', (
('TEST32', {'copy_number': 32}),
('TEST320', {'copy_number': 320}),
('TEST3', {'copy_number': 3}),
('TEST2', {'copy_number': 2}),
('TEST321', {'copy_number': 321}),
('TEST392', {'copy_number': 392}),
('TEST932', {'copy_number': 932}),
('TEST3092', {'copy_number': 3092}),
), 'copyNumber[startswith]=3', None),
}, { 'TO_FIX: endswith text (bibs/creator) | matches end of any word':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[endswith]=est', ['TEST1', 'TEST2']),
}, { 'TO_FIX: endswith text (bibs/creator) | multiple words: no match':
('bibs', (
('TEST1', {'creator': 'Person, Test Alpha'}),
('TEST2', {'creator': 'Person, Test Beta'}),
), 'creator[endswith]=Test Alpha', None),
}, { 'TO_FIX: endswith text (bibs/creator) | punctuation: no match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[endswith]=1900-', None),
}, { 'FYI: endswith string (items/call_number) | CN normalization':
# Call numbers are strings, but, unlike other strings, they are
# normalized before matching, since, e.g., MT 100 == mt 100 ==
# mt100 == MT-100.
('items', (
('TEST1', {'call_number': 'MT 100.1 .C35 1995'}),
('TEST2', {'call_number': 'MT 100.1 .G322 2001'}),
), 'callNumber[endswith]=100.1 c35 1995', ['TEST1']),
}, { 'UNSURE: endswith int (items/copy_number) | no match, ever':
('items', (
('TEST32', {'copy_number': 32}),
('TEST320', {'copy_number': 320}),
('TEST3', {'copy_number': 3}),
('TEST2', {'copy_number': 2}),
('TEST321', {'copy_number': 321}),
('TEST392', {'copy_number': 392}),
('TEST932', {'copy_number': 932}),
('TEST3092', {'copy_number': 3092}),
), 'copyNumber[endswith]=2', None),
}, { 'TO_FIX: matches text (bibs/creator) | ^ matches start of word':
('bibs', (
('TEST1', {'creator': 'Smith, Sonia'}),
('TEST2', {'creator': 'Person, Test'}),
), 'creator[matches]=^[Ss]on', ['TEST1']),
}, { 'TO_FIX: matches text (bibs/creator) | $ matches end of word':
('bibs', (
('TEST1', {'creator': 'Smith, Sonia'}),
('TEST2', {'creator': 'Person, Test'}),
), 'creator[matches]=[Ss]on$', ['TEST2']),
}, { 'TO_FIX: matches text (bibs/creator) | cannot match across >1 words':
('bibs', (
('TEST1', {'creator': 'Test A Person'}),
('TEST2', {'creator': 'Test B Person'}),
), 'creator[matches]=Test [AB] Person', None),
}, { 'TO_FIX: matches text (bibs/creator) | punctuation: no match':
('bibs', (
('TEST1', {'creator': 'Person, Test A. 1900-'}),
('TEST2', {'creator': 'Person, Test B. 1900-'}),
), 'creator[matches]=Person,', None),
}, { 'FYI: matches string (items/call_number) | CN normalization':
# Call numbers are strings, but, unlike other strings, they are
# normalized before matching, since, e.g., MT 100 == mt 100 ==
# mt100 == MT-100.
('items', (
('TEST1', {'call_number': 'MT 100.1 .C35 1995'}),
('TEST2', {'call_number': 'MT 100.1 .G322 2001'}),
), 'callNumber[matches]=^mt100', ['TEST1', 'TEST2']),
}, { 'UNSURE: matches int (items/copy_number) | no match, ever':
('items', (
('TEST1', {'copy_number': 32}),
('TEST2', {'copy_number': 320}),
), 'copyNumber[matches]=^3', None),
}, { 'TO_FIX: gt/gte/lt/lte string (items/call_number) | CN normalization':
# Call number normalization for searching is useless for
# gt/gte/lt/lte/range comparisons, but that's currently what's
# used. Currently doing a call_number[gt]=mt100 filter will
# match both "MT 20" and "MT 1 .B82" -- because the search
# normalization removes spaces and punctuation. (MT 20 ==> MT20
# and MT 1 .B82 ==> MT1B82.) We SHOULD use call number sort
# normalization for these operators.
('items', (
('TEST1', {'call_number': 'MT 100.1 .C35 1995'}),
('TEST2', {'call_number': 'MT 20'}),
('TEST3', {'call_number': 'MT 1 .B82'}),
), 'callNumber[gt]=mt100', ['TEST1', 'TEST2', 'TEST3']),
},
)
# PARAMETERS__ORDERBY_TESTS__INTENDED: Parameters for testing API
# filters that use an orderBy parameter (to define what order to return
# results in). These are similar to the
# PARAMETERS__FILTER_TESTS__INTENDED parameters, but they include an
# orderBy parameter in the search string.
PARAMETERS__ORDERBY_TESTS__INTENDED = (
'resource, test_data, search, expected',
{ 'order by int (items/copy_number) | ascending':
('items', (
('TEST11', {'volume': 'TEST', 'copy_number': 11}),
('TEST2', {'volume': 'TEST', 'copy_number': 2}),
('TEST1', {'volume': 'TEST', 'copy_number': 1}),
('TEST200', {'volume': 'TEST', 'copy_number': 200}),
('TEST10', {'volume': 'TEST', 'copy_number': 10}),
('TEST3', {'volume': 'TEST', 'copy_number': 3}),
), 'volume=TEST&orderBy=copyNumber',
['TEST1', 'TEST2', 'TEST3', 'TEST10', 'TEST11', 'TEST200']),
}, { 'order by int (items/copy_number) | descending':
('items', (
('TEST11', {'volume': 'TEST', 'copy_number': 11}),
('TEST2', {'volume': 'TEST', 'copy_number': 2}),
('TEST1', {'volume': 'TEST', 'copy_number': 1}),
('TEST200', {'volume': 'TEST', 'copy_number': 200}),
('TEST10', {'volume': 'TEST', 'copy_number': 10}),
('TEST3', {'volume': 'TEST', 'copy_number': 3}),
), 'volume=TEST&orderBy=-copyNumber',
['TEST200', 'TEST11', 'TEST10', 'TEST3', 'TEST2', 'TEST1']),
}, { 'order by string (items/barcode) | ascending':
('items', (
('TEST11', {'volume': 'TEST', 'barcode': 'A11'}),
('TEST2', {'volume': 'TEST', 'barcode': 'A2'}),
('TEST1', {'volume': 'TEST', 'barcode': 'A1'}),
('TEST200', {'volume': 'TEST', 'barcode': 'A200'}),
('TEST10', {'volume': 'TEST', 'barcode': 'A10'}),
('TEST3', {'volume': 'TEST', 'barcode': 'A3'}),
), 'volume=TEST&orderBy=barcode',
['TEST1', 'TEST10', 'TEST11', 'TEST2', 'TEST200', 'TEST3']),
}, { 'order by string (items/barcode) | descending':
('items', (
('TEST11', {'volume': 'TEST', 'barcode': 'A11'}),
('TEST2', {'volume': 'TEST', 'barcode': 'A2'}),
('TEST1', {'volume': 'TEST', 'barcode': 'A1'}),
('TEST200', {'volume': 'TEST', 'barcode': 'A200'}),
('TEST10', {'volume': 'TEST', 'barcode': 'A10'}),
('TEST3', {'volume': 'TEST', 'barcode': 'A3'}),
), 'volume=TEST&orderBy=-barcode',
['TEST3', 'TEST200', 'TEST2', 'TEST11', 'TEST10', 'TEST1']),
}, { 'order by date (items/checkout_date) | ascending':
('items', (
('TEST4', {'volume': 'TEST',
'checkout_date': datetime(2018, 10, 11, 2, 0, 0,
tzinfo=utc)}),
('TEST1', {'volume': 'TEST',
'checkout_date': datetime(2018, 2, 20, 0, 0, 0,
tzinfo=utc)}),
('TEST6', {'volume': 'TEST',
'checkout_date': datetime(2019, 1, 1, 12, 0, 0,
tzinfo=utc)}),
('TEST3', {'volume': 'TEST',
'checkout_date': datetime(2018, 10, 2, 2, 0, 0,
tzinfo=utc)}),
('TEST5', {'volume': 'TEST',
'checkout_date': datetime(2018, 10, 11, 11, 0, 0,
tzinfo=utc)}),
('TEST2', {'volume': 'TEST',
'checkout_date': datetime(2018, 2, 20, 0, 0, 1,
tzinfo=utc)}),
), 'volume=TEST&orderBy=checkoutDate',
['TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6']),
}, { 'order by date (items/checkout_date) | descending':
('items', (
('TEST4', {'volume': 'TEST',
'checkout_date': datetime(2018, 10, 11, 2, 0, 0,
tzinfo=utc)}),
('TEST1', {'volume': 'TEST',
'checkout_date': datetime(2018, 2, 20, 0, 0, 0,
tzinfo=utc)}),
('TEST6', {'volume': 'TEST',
'checkout_date': datetime(2019, 1, 1, 12, 0, 0,
tzinfo=utc)}),
('TEST3', {'volume': 'TEST',
'checkout_date': datetime(2018, 10, 2, 2, 0, 0,
tzinfo=utc)}),
('TEST5', {'volume': 'TEST',
'checkout_date': datetime(2018, 10, 11, 11, 0, 0,
tzinfo=utc)}),
('TEST2', {'volume': 'TEST',
'checkout_date': datetime(2018, 2, 20, 0, 0, 1,
tzinfo=utc)}),
), 'volume=TEST&orderBy=-checkoutDate',
['TEST6', 'TEST5', 'TEST4', 'TEST3', 'TEST2', 'TEST1']),
}, { 'order by multiple | string asc, int asc':
('items', (
('TEST5', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'B'}),
('TEST6', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'B'}),
('TEST2', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'A'}),
('TEST1', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'A'}),
('TEST3', {'volume': 'TEST', 'copy_number': 10, 'barcode': 'A'}),
('TEST4', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'AA'}),
), 'volume=TEST&orderBy=barcode,copyNumber',
['TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6']),
}, { 'order by multiple | string desc, int desc':
('items', (
('TEST5', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'B'}),
('TEST6', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'B'}),
('TEST2', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'A'}),
('TEST1', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'A'}),
('TEST3', {'volume': 'TEST', 'copy_number': 10, 'barcode': 'A'}),
('TEST4', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'AA'}),
), 'volume=TEST&orderBy=-barcode,-copyNumber',
['TEST6', 'TEST5', 'TEST4', 'TEST3', 'TEST2', 'TEST1']),
}, { 'order by multiple | int asc, string asc':
('items', (
('TEST3', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'B'}),
('TEST5', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'B'}),
('TEST4', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'A'}),
('TEST1', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'A'}),
('TEST6', {'volume': 'TEST', 'copy_number': 10, 'barcode': 'A'}),
('TEST2', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'AA'}),
), 'volume=TEST&orderBy=copyNumber,barcode',
['TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6']),
}, { 'order by multiple | int desc, string desc':
('items', (
('TEST3', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'B'}),
('TEST5', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'B'}),
('TEST4', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'A'}),
('TEST1', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'A'}),
('TEST6', {'volume': 'TEST', 'copy_number': 10, 'barcode': 'A'}),
('TEST2', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'AA'}),
), 'volume=TEST&orderBy=-copyNumber,-barcode',
['TEST6', 'TEST5', 'TEST4', 'TEST3', 'TEST2', 'TEST1']),
}, { 'order by multiple | int asc, string desc':
('items', (
('TEST1', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'B'}),
('TEST4', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'B'}),
('TEST5', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'A'}),
('TEST3', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'A'}),
('TEST6', {'volume': 'TEST', 'copy_number': 10, 'barcode': 'A'}),
('TEST2', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'AA'}),
), 'volume=TEST&orderBy=copyNumber,-barcode',
['TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6']),
}, { 'order by multiple | int desc, string asc':
('items', (
('TEST1', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'B'}),
('TEST4', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'B'}),
('TEST5', {'volume': 'TEST', 'copy_number': 2, 'barcode': 'A'}),
('TEST3', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'A'}),
('TEST6', {'volume': 'TEST', 'copy_number': 10, 'barcode': 'A'}),
('TEST2', {'volume': 'TEST', 'copy_number': 1, 'barcode': 'AA'}),
), 'volume=TEST&orderBy=-copyNumber,barcode',
['TEST6', 'TEST5', 'TEST4', 'TEST3', 'TEST2', 'TEST1']),
},
)
# PARAMETERS__ORDERBY_TESTS__STRANGE: Parameters for testing API
# filters that use an orderBy parameter and don't quite behave as you
# might expect. These are similar to the
# PARAMETERS__FILTER_TESTS__STRANGE parameters, but they include an
# orderBy parameter in the search string.
PARAMETERS__ORDERBY_TESTS__STRANGE = (
'resource, test_data, search, expected',
# Order by TEXT fields: Currently we don't actually allow ordering
# by any text fields (hasn't been needed). If we ever enable that,
# we should add `strange` tests here to capture the odd ordering
# behavior, and then work to fix it, if it's still broken at that
# point.
# Order by CALL NUMBERS: Sorting items by call number is core
# functionality. So why am I putting it in STRANGE? Most fields
# actually sort on the field in the `orderBy` parameter. But for
# call numbers, if a request contains 'orderBy=callNumber', it
# uses the `call_number_sort` field instead, automatically. Which
# ... maybe that would be okay if `callNumberSort` weren't a field
# that the API exposes (which it is)! To make things even stranger,
# 'orderBy=callNumberSort' doesn't work, because it's not a field
# that's enabled for orderBy. So--it's a case where the API tries
# to be smarter than the API consumer, but the behavior isn't
# consistent with how other fields behave, so it may be confusing.
{ 'order by call number (items/call_number) | ascending':
('items', (
('TEST3', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 100 .G322 2001'}),
('TEST6', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 120 .G322 2001'}),
('TEST1', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MS 100 .C35 1995'}),
('TEST5', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 100.1 .A2 1999'}),
('TEST2', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 20 .B5 2016'}),
('TEST4', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 100.1 .A12 1999'}),
), 'volume=TEST&orderBy=callNumber',
['TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6']),
}, { 'order by call number (items/call_number) | descending':
('items', (
('TEST3', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 100 .G322 2001'}),
('TEST6', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 120 .G322 2001'}),
('TEST1', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MS 100 .C35 1995'}),
('TEST5', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 100.1 .A2 1999'}),
('TEST2', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 20 .B5 2016'}),
('TEST4', {'volume': 'TEST', 'call_number_type': 'lc',
'call_number': 'MT 100.1 .A12 1999'}),
), 'volume=TEST&orderBy=-callNumber',
['TEST6', 'TEST5', 'TEST4', 'TEST3', 'TEST2', 'TEST1']),
},
)
# TESTDATA__FIRSTITEMPERLOCATION: We use a consistent set of test data
# for testing the firstitemperlocation resource.
TESTDATA__FIRSTITEMPERLOCATION = (
( 'atest1',
{ 'location_code': 'atest',
'barcode': '1',
'call_number': 'BB 1234 C35 1990',
'call_number_type': 'lc' } ),
( 'atest2',
{ 'location_code': 'atest',
'barcode': '2',
'call_number': 'BB 1234 A22 2000',
'call_number_type': 'lc' } ),
( 'atest3',
{ 'location_code': 'atest',
'barcode': '3',
'call_number': 'BC 2345 F80',
'call_number_type': 'lc' } ),
( 'atest4',
{ 'location_code': 'atest',
'barcode': '4',
'call_number': 'BB 1234',
'call_number_type': 'sudoc' } ),
( 'btest1',
{ 'location_code': 'btest',
'barcode': '3',
'call_number': 'BB 1234 D99',
'call_number_type': 'lc' } ),
( 'btest2',
{ 'location_code': 'btest',
'barcode': '4',
'call_number': 'BB 1234 A22',
'call_number_type': 'sudoc' } ),
( 'btest3',
{ 'location_code': 'btest',
'barcode': '5',
'call_number': 'CC 9876 H43',
'call_number_type': 'lc' } ),
( 'btest4',
{ 'location_code': 'btest',
'barcode': '6',
'call_number': 'BB 1234',
'call_number_type': 'sudoc' } ),
( 'ctest1',
{ 'location_code': 'ctest',
'barcode': '8',
'call_number': 'BB 1234 D99 2016',
'call_number_type': 'lc' } ),
( 'ctest2',
{ 'location_code': 'ctest',
'barcode': '9',
'call_number': 'CC 1234 A22',
'call_number_type': 'other' } ),
( 'ctest3',
{ 'location_code': 'ctest',
'barcode': '10',
'call_number': '900.1 H43',
'call_number_type': 'dewey' } ),
( 'ctest4',
{ 'location_code': 'ctest',
'barcode': '11',
'call_number': 'AB 1234',
'call_number_type': 'other' } ),
)
PARAMETERS__FIRSTITEMPERLOCATION = (
('test_data, search, expected'),
{ 'LC call number type | A match at each location':
(TESTDATA__FIRSTITEMPERLOCATION,
'callNumber[startswith]=BB 12&callNumberType=lc',
['atest2', 'btest1', 'ctest1']),
}, { 'LC call number type | A match at one location':
(TESTDATA__FIRSTITEMPERLOCATION,
'callNumber[startswith]=BC&callNumberType=lc',
['atest3']),
}, { 'LC call number type | No matches':
(TESTDATA__FIRSTITEMPERLOCATION,
'callNumber[startswith]=D&callNumberType=lc',
None),
}, { 'SUDOC call number type | A match at two locations':
(TESTDATA__FIRSTITEMPERLOCATION,
'callNumber[startswith]=BB&callNumberType=sudoc',
['atest4', 'btest4']),
}, { 'DEWEY call number type | A match at one location':
(TESTDATA__FIRSTITEMPERLOCATION,
'callNumber[startswith]=900&callNumberType=dewey',
['ctest3']),
}, { 'OTHER call number type | A match at one location':
(TESTDATA__FIRSTITEMPERLOCATION,
'callNumber[startswith]=C&callNumberType=other',
['ctest2']),
}, { 'BARCODE | A match at two locations':
(TESTDATA__FIRSTITEMPERLOCATION,
'barcode=3',
['atest3', 'btest1']),
},
)
# TESTDATA__CALLNUMBERMATCHES: We use a consistent set of test data for
# testing the callnumbermatches resource.
TESTDATA__CALLNUMBERMATCHES = (
( 'atest1',
{ 'location_code': 'atest',
'call_number': 'ZZZ 1005',
'call_number_type': 'lc' } ),
( 'atest2',
{ 'location_code': 'atest',
'call_number': 'ZZZ 1000',
'call_number_type': 'lc' } ),
( 'atest3',
{ 'location_code': 'atest',
'call_number': 'ZZZ 1001',
'call_number_type': 'lc' } ),
( 'btest1',
{ 'location_code': 'btest',
'call_number': 'ZZZ 1003',
'call_number_type': 'lc' } ),
( 'btest2',
{ 'location_code': 'btest',
'call_number': 'ZZZ 1002',
'call_number_type': 'lc' } ),
( 'btest3',
{ 'location_code': 'btest',
'call_number': 'ZZZ 1004',
'call_number_type': 'lc' } ),
( 'ctest1',
{ 'location_code': 'ctest',
'call_number': 'ZZZ 1.3',
'call_number_type': 'sudoc' } ),
( 'ctest2',
{ 'location_code': 'ctest',
'call_number': 'ZZZ 1.2',
'call_number_type': 'sudoc' } ),
( 'ctest3',
{ 'location_code': 'ctest',
'call_number': 'ZZZ 1.1',
'call_number_type': 'sudoc' } ),
)
PARAMETERS__CALLNUMBERMATCHES = (
('test_data, search, expected'),
{ 'Match all locations, all CN types':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ',
['ZZZ 1.1', 'ZZZ 1.2', 'ZZZ 1.3', 'ZZZ 1000', 'ZZZ 1001', 'ZZZ 1002',
'ZZZ 1003', 'ZZZ 1004', 'ZZZ 1005']),
}, { 'Match all locations, all CN types, with limit':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&limit=4',
['ZZZ 1.1', 'ZZZ 1.2', 'ZZZ 1.3', 'ZZZ 1000']),
}, { 'Match all locations, LC type':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&callNumberType=lc',
['ZZZ 1000', 'ZZZ 1001', 'ZZZ 1002', 'ZZZ 1003', 'ZZZ 1004',
'ZZZ 1005']),
}, { 'Match all locations, SUDOC type':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&callNumberType=sudoc',
['ZZZ 1.1', 'ZZZ 1.2', 'ZZZ 1.3']),
}, { 'Match location atest, all CN types':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&locationCode=atest',
['ZZZ 1000', 'ZZZ 1001', 'ZZZ 1005']),
}, { 'Match location btest, all CN types':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&locationCode=btest',
['ZZZ 1002', 'ZZZ 1003', 'ZZZ 1004']),
}, { 'Match location ctest, all CN types':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&locationCode=ctest',
['ZZZ 1.1', 'ZZZ 1.2', 'ZZZ 1.3']),
}, { 'Match location atest, LC type':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&callNumberType=lc&locationCode=atest',
['ZZZ 1000', 'ZZZ 1001', 'ZZZ 1005']),
}, { 'Match location ctest, LC type':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ&callNumberType=lc&locationCode=ctest',
[]),
}, { 'Match one call number':
(TESTDATA__CALLNUMBERMATCHES,
'callNumber[startswith]=ZZZ1001',
['ZZZ 1001']),
},
)
# HELPER FUNCTIONS for compiling test data into pytest parameters
def compile_resource_links(resources):
"""
Return a (resource, links) tuple for RESOURCE_METADATA (or similar)
entries that have a `links` element, for test parametrization.
"""
return [(k, v['links']) for k, v in resources.items()
if v.get('links', None)]
def compile_params(parameters):
"""
Compile a tuple of test parameters for pytest.parametrize, from one
of the above PARAMETERS__* constants.
"""
return tuple(p.values()[0] for p in parameters[1:])
def compile_ids(parameters):
"""
Compile a tuple of test IDs for pytest.parametrize, from one of the
above PARAMETERS__* constants.
"""
return tuple(p.keys()[0] for p in parameters[1:])
# PYTEST FIXTURES
@pytest.fixture
def api_settings(settings):
"""
Pytest fixture that sets a few default Django settings for the API
tests in this module. Returns the `settings` object. Doing setup
like this here via a fixture seems slightly better than putting
this in the `test` settings module--the relevant settings are
closer to the tests that use them. Just have to make sure to
include this fixture in all of the tests that need them.
"""
settings.REST_FRAMEWORK['PAGINATE_BY_PARAM'] = 'limit'
settings.REST_FRAMEWORK['PAGINATE_PARAM'] = 'offset'
settings.REST_FRAMEWORK['SEARCH_PARAM'] = 'search'
settings.REST_FRAMEWORK['SEARCHTYPE_PARAM'] = 'searchtype'
settings.REST_FRAMEWORK['MAX_PAGINATE_BY'] = 500
settings.REST_FRAMEWORK['PAGINATE_BY'] = 500
return settings
@pytest.fixture(scope='function')
def assemble_api_test_records(assemble_test_records, api_solr_env,
basic_solr_assembler):
"""
Pytest fixture. Returns a helper function that assembles & loads a
set of test records (for one test) into the api_solr_env test-data
environment fixture.
Required args include a `profile` string, the name of the unique
`id_field` for each record (for test_data record uniqueness), and a
set of static `test_data` partial records. Returns a tuple of
default solr_env records and the new test records that were loaded
from the provided test data. len(env_recs) + len(test_recs) should
equal the total number of Solr records for that profile.
"""
def _assemble_api_test_records(profile, id_field, test_data):
return assemble_test_records(profile, id_field, test_data,
env=api_solr_env,
assembler=basic_solr_assembler)
return _assemble_api_test_records
# TESTS
# ---------------------------------------------------------------------
@pytest.mark.django_db
def test_apiusers_authenticated_requests(api_client,
apiuser_with_custom_defaults,
simple_sig_auth_credentials,
assert_obj_fields_match_serializer):
"""
The apiusers resource requires authentication to access; users that
can authenticate can view the apiusers list and details of a single
apiuser. Authentication must be renewed after each request.
"""
test_cls = apiuser_with_custom_defaults()
api_user = test_cls.objects.create_user('test', 'secret', password='pw',
email='[email protected]',
first_name='F', last_name='Last')
api_client.credentials(**simple_sig_auth_credentials(api_user))
list_resp = api_client.get('{}apiusers/'.format(API_ROOT))
assert list_resp.status_code == 200
api_client.credentials(**simple_sig_auth_credentials(api_user))
detail_resp = api_client.get('{}apiusers/{}'.format(API_ROOT, 'test'))
serializer = detail_resp.renderer_context['view'].get_serializer()
assert_obj_fields_match_serializer(detail_resp.data, serializer)
@pytest.mark.django_db
def test_apiusers_not_django_users(model_instance, api_client,
apiuser_with_custom_defaults,
simple_sig_auth_credentials):
"""
Django Users that don't have associated APIUsers records should
not appear in the list of apiusers.
"""
test_cls = apiuser_with_custom_defaults()
api_user = test_cls.objects.create_user('test', 'secret', password='pw',
email='[email protected]',
first_name='F', last_name='Last')
user = model_instance(User, 'bob', '[email protected]', 'bobpassword')
api_client.credentials(**simple_sig_auth_credentials(api_user))
response = api_client.get('{}apiusers/'.format(API_ROOT))
usernames = [r['username'] for r in response.data['_embedded']['apiusers']]
assert 'test' in usernames
assert 'bob' not in usernames
@pytest.mark.django_db
def test_apiusers_unauthenticated_requests_fail(api_client,
apiuser_with_custom_defaults):
"""
Requesting an apiuser list or detail view without providing any
authentication credentials should result in a 403 error.
"""
test_cls = apiuser_with_custom_defaults()
api_user = test_cls.objects.create_user('test', 'secret', password='pw',
email='[email protected]',
first_name='F', last_name='Last')
list_resp = api_client.get('{}apiusers/'.format(API_ROOT))
detail_resp = api_client.get('{}apiusers/test'.format(API_ROOT))
assert list_resp.status_code == 403
assert detail_resp.status_code == 403
@pytest.mark.django_db
def test_apiusers_wrong_username_requests_fail(api_client,
apiuser_with_custom_defaults,
simple_sig_auth_credentials):
"""
Providing an incorrect username/password pair in authentication
headers results in a 403 error.
"""
test_cls = apiuser_with_custom_defaults()
api_user1 = test_cls.objects.create_user('test', 'secret', password='pw',
email='[email protected]',
first_name='F', last_name='Last')
api_user2 = test_cls.objects.create_user('test2', 'secret', password='pw2',
email='[email protected]',
first_name='G', last_name='Last')
credentials = simple_sig_auth_credentials(api_user1)
credentials['HTTP_X_USERNAME'] = 'test2'
api_client.credentials(**credentials)
list_resp = api_client.get('{}apiusers/'.format(API_ROOT))
assert list_resp.status_code == 403
@pytest.mark.django_db
def test_apiusers_repeated_requests_fail(api_client,
apiuser_with_custom_defaults,
simple_sig_auth_credentials):
"""
Attempting to beat apiusers authentication by submitting multiple
requests without renewing credentials should result in a 403 error
on the second request.
"""
test_cls = apiuser_with_custom_defaults()
api_user = test_cls.objects.create_user('test', 'secret', password='pw',
email='[email protected]',
first_name='F', last_name='Last')
api_client.credentials(**simple_sig_auth_credentials(api_user))
resp_one = api_client.get('{}apiusers/'.format(API_ROOT))
resp_two = api_client.get('{}apiusers/'.format(API_ROOT))
assert resp_one.status_code == 200
assert resp_two.status_code == 403
@pytest.mark.parametrize('resource', RESOURCE_METADATA.keys())
def test_standard_resource(resource, api_settings, api_solr_env, api_client,
pick_reference_object_having_link,
assert_obj_fields_match_serializer):
"""
Standard resources (each with a "list" and "detail" view) should
have objects available in an "_embedded" object in the list view,
and accessing an object's "_links / self" URL should give you the
same data object. Data objects should have fields matching the
associated view serializer's `fields` attribute.
"""
list_resp = api_client.get('{}{}/'.format(API_ROOT, resource))
objects = list_resp.data['_embedded'][resource]
ref_obj = pick_reference_object_having_link(objects, 'self')
detail_resp = api_client.get(ref_obj['_links']['self']['href'])
detail_obj = detail_resp.data
assert ref_obj == detail_obj
serializer = detail_resp.renderer_context['view'].get_serializer()
assert_obj_fields_match_serializer(detail_obj, serializer)
@pytest.mark.parametrize('resource, links',
compile_resource_links(RESOURCE_METADATA))
def test_standard_resource_links(resource, links, api_settings, api_solr_env,
api_client,
pick_reference_object_having_link,
assert_obj_fields_match_serializer,
get_linked_view_and_objects):
"""
Accessing linked resources from standard resources (via _links)
should return the expected resource(s).
"""
resp = api_client.get('{}{}/'.format(API_ROOT, resource))
objects = resp.data['_embedded'][resource]
for linked_resource, field in links.items():
ref_obj = pick_reference_object_having_link(objects, field)
lview, lobjs = get_linked_view_and_objects(api_client, ref_obj, field)
assert lview.resource_name == linked_resource
assert_obj_fields_match_serializer(lobjs[0], lview.get_serializer())
revfield = RESOURCE_METADATA[linked_resource]['links'][resource]
_, rev_objs = get_linked_view_and_objects(api_client, lobjs[0],
revfield)
assert ref_obj in rev_objs
@pytest.mark.parametrize('url, err_text', [
('items/?dueDate[gt]=2018', 'datetime was formatted incorrectly'),
('items/?recordNumber[invalid]=i10000100', 'not a valid operator'),
('items/?recordNumber[in]=i10000100', 'require an array'),
('items/?recordNumber[range]=i10000100', 'require an array'),
('items/?recordNumber=[i1,i2]', 'Arrays of values are only used'),
('items/?nonExistent=0', 'not a valid field for filtering'),
('items/?orderBy=nonExistent', 'not a valid field for ordering'),
('bibs/?searchtype=nonExistent', 'searchtype parameter must be'),
('bibs/?search=none:none', 'undefined field'),
('bibs/?suppressed=not', 'expected a boolean'),
('bibs/?recordNumber[isnull]=not', 'expected a boolean'),
('items/?copyNumber[range]=[1, 2]', 'input string: " 2"'),
])
def test_request_error_badquery(url, err_text, api_solr_env, api_client,
api_settings):
"""
Requesting from the given URL should result in a 400 error response
(due to a bad query), which contains the given error text.
"""
response = api_client.get('{}{}'.format(API_ROOT, url))
assert response.status_code == 400
assert err_text in response.data['detail']
@pytest.mark.parametrize('resource, default_limit, max_limit, limit, offset, '
'exp_results, exp_start, exp_end, exp_prev_offset, '
'exp_next_offset', [
('items', 20, 50, None, None, 20, 0, 19, None, 20),
('items', 20, 50, 20, None, 20, 0, 19, None, 20),
('items', 20, 50, None, 0, 20, 0, 19, None, 20),
('items', 20, 50, 20, 0, 20, 0, 19, None, 20),
('items', 20, 50, 20, 1, 20, 1, 20, 0, 21),
('items', 20, 50, 20, 20, 20, 20, 39, 0, 40),
('items', 20, 50, 20, 40, 20, 40, 59, 20, 60),
('items', 20, 50, 25, 20, 25, 20, 44, 0, 45),
('items', 20, 50, 20, 180, 20, 180, 199, 160, None),
('items', 20, 50, 20, 190, 10, 190, 199, 170, None),
('items', 20, 50, 0, None, 0, 0, -1, None, 0),
('items', 20, 50, 50, None, 50, 0, 49, None, 50),
('items', 20, 50, 51, None, 50, 0, 49, None, 50),
('items', 20, 300, 300, None, 200, 0, 199, None, None),
('items', 20, 50, 20, 300, 0, 300, 199, 280, None),
], ids=[
'no limit or offset given => use defaults',
'limit=default, no offset given => use defaults',
'no limit given, offset=0 => use defaults',
'limit=default and offset=0 => use defaults',
'limit=20, offset=1 => 20 results, page offset by 1',
'limit=20, offset=20 => 20 results, page offset by 20',
'limit=20, offset=40 => 20 results, page offset by 40',
'limit=25, offset=20 => 25 results, page offset by 20',
'limit=20, offset=180 (total recs is 200) => 20 results, no next page',
'limit=20, offset=190 (total recs is 200) => 10 results, no next page',
'limit=0 => 0 results (STRANGE: endRow, next page)',
'limit=max => max results',
'limit > max => max results',
'limit > total => total results, no next page',
'offset > total => 0 results, no next page (STRANGE: startRow, prev page)'
])
def test_list_view_pagination(resource, default_limit, max_limit, limit,
offset, exp_results, exp_start, exp_end,
exp_prev_offset, exp_next_offset, api_settings,
api_solr_env, api_client):
"""
Requesting the given resource using the provided limit and offset
parameters should result in a data structure that we can paginate
through in predictable ways.
"""
api_settings.REST_FRAMEWORK['PAGINATE_BY'] = default_limit
api_settings.REST_FRAMEWORK['MAX_PAGINATE_BY'] = max_limit
profile = RESOURCE_METADATA[resource]['profile']
exp_total = len(api_solr_env.records[profile])
base_url = '{}{}/'.format(API_ROOT, resource)
limitq = 'limit={}'.format(limit) if limit is not None else ''
offsetq = 'offset={}'.format(offset) if offset is not None else ''
qstring = '&'.join([part for part in (limitq, offsetq) if part])
url = '?'.join([part for part in (base_url, qstring) if part])
response = api_client.get(url)
data = response.data
self_link = data['_links']['self']['href']
next_link = data['_links'].get('next', {'href': None})['href']
prev_link = data['_links'].get('previous', {'href': None})['href']
records = data.get('_embedded', {resource: []})[resource]
assert response.status_code == 200
assert len(records) == exp_results
assert data['totalCount'] == exp_total
assert data['startRow'] == exp_start
assert data['endRow'] == exp_end
assert self_link.endswith(url)
if exp_next_offset is None:
assert next_link is None
else:
assert limitq in next_link
assert 'offset={}'.format(exp_next_offset) in next_link
if exp_prev_offset is None:
assert prev_link is None
else:
assert limitq in prev_link
assert 'offset={}'.format(exp_prev_offset) in prev_link
@pytest.mark.parametrize('resource, test_data, search, expected',
compile_params(PARAMETERS__FILTER_TESTS__INTENDED) +
compile_params(PARAMETERS__FILTER_TESTS__STRANGE),
ids=compile_ids(PARAMETERS__FILTER_TESTS__INTENDED) +
compile_ids(PARAMETERS__FILTER_TESTS__STRANGE))
def test_list_view_filters(resource, test_data, search, expected, api_settings,
assemble_api_test_records, api_client,
get_found_ids, do_filter_search):
"""
Given the provided `test_data` records: requesting the given
`resource` using the provided search filter parameters (`search`)
should return each of the records in `expected` and NONE of the
records NOT in `expected`.
"""
test_ids = set([r[0] for r in test_data])
expected_ids = set(expected) if expected is not None else set()
not_expected_ids = test_ids - expected_ids
profile = RESOURCE_METADATA[resource]['profile']
id_field = RESOURCE_METADATA[resource]['id_field']
erecs, trecs = assemble_api_test_records(profile, id_field, test_data)
# First let's do a quick sanity check to make sure the resource
# returns the correct num of records before the filter is applied.
resource_url = '{}{}/'.format(API_ROOT, resource)
check_response = api_client.get(resource_url)
assert check_response.data['totalCount'] == len(erecs) + len(trecs)
response = do_filter_search(resource_url, search, api_client)
found_ids = set(get_found_ids(id_field, response))
assert all([i in found_ids for i in expected_ids])
assert all([i not in found_ids for i in not_expected_ids])
@pytest.mark.parametrize('resource, test_data, search, expected',
compile_params(PARAMETERS__ORDERBY_TESTS__INTENDED) +
compile_params(PARAMETERS__ORDERBY_TESTS__STRANGE),
ids=compile_ids(PARAMETERS__ORDERBY_TESTS__INTENDED) +
compile_ids(PARAMETERS__ORDERBY_TESTS__STRANGE))
def test_list_view_orderby(resource, test_data, search, expected, api_settings,
assemble_api_test_records, api_client,
get_found_ids, do_filter_search):
"""
Given the provided `test_data` records: requesting the given
`resource` using the provided search filter parameters (`search`)
(which include an `orderBy` parameter), should return records in
the `expected` order.
"""
profile = RESOURCE_METADATA[resource]['profile']
id_field = RESOURCE_METADATA[resource]['id_field']
erecs, trecs = assemble_api_test_records(profile, id_field, test_data)
print [r.get('call_number_sort', None) for r in trecs]
resource_url = '{}{}/'.format(API_ROOT, resource)
response = do_filter_search(resource_url, search, api_client)
found_ids = get_found_ids(id_field, response)
assert found_ids == expected
@pytest.mark.parametrize('test_data, search, expected',
compile_params(PARAMETERS__FIRSTITEMPERLOCATION),
ids=compile_ids(PARAMETERS__FIRSTITEMPERLOCATION))
def test_firstitemperlocation_list(test_data, search, expected, api_settings,
assemble_api_test_records, api_client,
get_found_ids, do_filter_search):
"""
The `firstitemperlocation` resource is basically a custom filter
for `items` that submits a facet-query to Solr asking for the first
item at each location code that matches the provided call number
(plus cn type) or barcode. (Used by the Inventory App when doing a
call number or barcode lookup without providing a location.)
"""
lcodes = set([r['location_code'] for _, r in test_data])
data = {
'locations': tuple((code, {'label': code}) for code in lcodes),
'items': test_data
}
test_ids = set([r[0] for r in test_data])
expected_ids = set(expected) if expected is not None else set()
not_expected_ids = test_ids - expected_ids
for resource in data.keys():
profile = RESOURCE_METADATA[resource]['profile']
id_field = RESOURCE_METADATA[resource]['id_field']
assemble_api_test_records(profile, id_field, data[resource])
resource_url = '{}firstitemperlocation/'.format(API_ROOT)
rsp = do_filter_search(resource_url, search, api_client)
found_ids = set(get_found_ids(RESOURCE_METADATA['items']['id_field'], rsp))
assert all([i in found_ids for i in expected_ids])
assert all([i not in found_ids for i in not_expected_ids])
@pytest.mark.parametrize('test_data, search, expected',
compile_params(PARAMETERS__CALLNUMBERMATCHES),
ids=compile_ids(PARAMETERS__CALLNUMBERMATCHES))
def test_callnumbermatches_list(test_data, search, expected, api_settings,
assemble_api_test_records, api_client,
do_filter_search):
"""
The `callnumbermatches` resource simply returns an array of
callnumber strings, in order, matching the critera that's given.
It's used to power the callnumber autocomplete in the Inventory
App.
"""
lcodes = set([r['location_code'] for _, r in test_data])
data = {
'locations': tuple((code, {'label': code}) for code in lcodes),
'items': test_data
}
for resource in data.keys():
profile = RESOURCE_METADATA[resource]['profile']
id_field = RESOURCE_METADATA[resource]['id_field']
assemble_api_test_records(profile, id_field, data[resource])
resource_url = '{}callnumbermatches/'.format(API_ROOT)
response = do_filter_search(resource_url, search, api_client)
assert response.data == expected
| {
"content_hash": "fc7cb28113bfcb1f9203b19e88ab4508",
"timestamp": "",
"source": "github",
"line_count": 2327,
"max_line_length": 88,
"avg_line_length": 49.38332617103567,
"alnum_prop": 0.5044859243788887,
"repo_name": "unt-libraries/catalog-api",
"id": "28d4973b045d2e0a0ca3f5968d80f8cc68225b60",
"size": "114915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/sierra/api/tests/test_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7252"
},
{
"name": "CSS",
"bytes": "250"
},
{
"name": "Dockerfile",
"bytes": "1512"
},
{
"name": "HTML",
"bytes": "8099"
},
{
"name": "JavaScript",
"bytes": "598"
},
{
"name": "Makefile",
"bytes": "7425"
},
{
"name": "Python",
"bytes": "1186791"
},
{
"name": "Shell",
"bytes": "18463"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gdb
import pwndbg.decorators
import pwndbg.events
import pwndbg.gdbutils
import pwndbg.memoize
from pwndbg.color import disable_colors
from pwndbg.color import message
funcs_list_str = ', '.join(message.notice('$' + f.name) for f in pwndbg.gdbutils.functions.functions)
hint_lines = (
'loaded %i commands. Type %s for a list.' % (len(pwndbg.commands.commands), message.notice('pwndbg [filter]')),
'created %s gdb functions (can be used with print/break)' % funcs_list_str
)
for line in hint_lines:
print(message.prompt('pwndbg: ') + message.system(line))
cur = (gdb.selected_inferior(), gdb.selected_thread())
def prompt_hook(*a):
global cur
pwndbg.decorators.first_prompt = True
new = (gdb.selected_inferior(), gdb.selected_thread())
if cur != new:
pwndbg.events.after_reload(start=False)
cur = new
if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:
prompt_hook_on_stop(*a)
@pwndbg.memoize.reset_on_stop
def prompt_hook_on_stop(*a):
pwndbg.commands.context.context()
@pwndbg.config.Trigger([message.config_prompt_color, disable_colors])
def set_prompt():
prompt = "pwndbg> "
if not disable_colors:
prompt = "\x02" + prompt + "\x01" # STX + prompt + SOH
prompt = message.prompt(prompt)
prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
gdb.execute('set prompt %s' % prompt)
if pwndbg.events.before_prompt_event.is_real_event:
gdb.prompt_hook = prompt_hook
else:
# Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook
def extended_prompt_hook(*a):
pwndbg.events.before_prompt_event.invoke_callbacks()
return prompt_hook(*a)
gdb.prompt_hook = extended_prompt_hook
| {
"content_hash": "a0906047a89581f899b9df411ccd1753",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 115,
"avg_line_length": 28.205882352941178,
"alnum_prop": 0.6856100104275287,
"repo_name": "0xddaa/pwndbg",
"id": "7645310abe1968b1e29b6f113b31192b2c84b4de",
"size": "1964",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "pwndbg/prompt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "584"
},
{
"name": "C",
"bytes": "113"
},
{
"name": "Makefile",
"bytes": "964"
},
{
"name": "Python",
"bytes": "1920581"
},
{
"name": "Shell",
"bytes": "5598"
}
],
"symlink_target": ""
} |
'''
This example shows how to define a custom function.
The function must return two values, the timestamp in microseconds
and the signal value. The function documentation is used to define
a label or name for the signal.
'''
import numpy
def myFunction(msg):
'''velocity magnitude'''
return msg.utime, numpy.linalg.norm(msg.vel)
addSignalFunction('POSE_BODY', myFunction)
| {
"content_hash": "22579e7f0497e6795c4933e192cff6b5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 67,
"avg_line_length": 24.1875,
"alnum_prop": 0.751937984496124,
"repo_name": "openhumanoids/signal-scope",
"id": "614122b6390fcb22c741634a61297f12b03aeb1f",
"size": "387",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/example3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "110357"
},
{
"name": "CMake",
"bytes": "44796"
},
{
"name": "Makefile",
"bytes": "3270"
},
{
"name": "Python",
"bytes": "7087"
},
{
"name": "Shell",
"bytes": "1412"
}
],
"symlink_target": ""
} |
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': project_path('src', 'apnea', 'db.sqlite3'),
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
COMPRESS_ENABLED = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
INTERNAL_IPS = ('127.0.0.1')
SECRET_KEY = "|-88pPZr)McRp]x^2zECo0f32jH:z$MQBUD!A+EWS>d66]fyp4"
ALLOWED_HOSTS = [u'localhost', u'localhost:8000', u'apnea.dev']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MEDIA_ROOT = project_path('media')
STATIC_ROOT = project_path('static') | {
"content_hash": "88d6c172f51cc9e1c0e8f02cbb6d70d7",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 22.625,
"alnum_prop": 0.6837016574585635,
"repo_name": "GotlingSystem/apnea",
"id": "953424cc6f2a710c5f0060736171e36738e5e337",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apnea/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60340"
},
{
"name": "JavaScript",
"bytes": "2161"
},
{
"name": "Python",
"bytes": "45697"
}
],
"symlink_target": ""
} |
import socket
import subprocess
import sys
import time
from airflow import configuration, LoggingMixin
NEED_KRB181_WORKAROUND = None
log = LoggingMixin().log
def renew_from_kt():
# The config is specified in seconds. But we ask for that same amount in
# minutes to give ourselves a large renewal buffer.
renewal_lifetime = "%sm" % configuration.getint('kerberos', 'reinit_frequency')
principal = configuration.get('kerberos', 'principal').replace("_HOST", socket.getfqdn())
cmdv = [configuration.get('kerberos', 'kinit_path'),
"-r", renewal_lifetime,
"-k", # host ticket
"-t", configuration.get('kerberos', 'keytab'), # specify keytab
"-c", configuration.get('kerberos', 'ccache'), # specify credentials cache
principal]
log.info("Reinitting kerberos from keytab: " + " ".join(cmdv))
subp = subprocess.Popen(cmdv,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
bufsize=-1,
universal_newlines=True)
subp.wait()
if subp.returncode != 0:
log.error("Couldn't reinit from keytab! `kinit' exited with %s.\n%s\n%s" % (
subp.returncode,
"\n".join(subp.stdout.readlines()),
"\n".join(subp.stderr.readlines())))
sys.exit(subp.returncode)
global NEED_KRB181_WORKAROUND
if NEED_KRB181_WORKAROUND is None:
NEED_KRB181_WORKAROUND = detect_conf_var()
if NEED_KRB181_WORKAROUND:
# (From: HUE-640). Kerberos clock have seconds level granularity. Make sure we
# renew the ticket after the initial valid time.
time.sleep(1.5)
perform_krb181_workaround()
def perform_krb181_workaround():
cmdv = [configuration.get('kerberos', 'kinit_path'),
"-c", configuration.get('kerberos', 'ccache'),
"-R"] # Renew ticket_cache
log.info("Renewing kerberos ticket to work around kerberos 1.8.1: " +
" ".join(cmdv))
ret = subprocess.call(cmdv, close_fds=True)
if ret != 0:
principal = "%s/%s" % (configuration.get('kerberos', 'principal'), socket.getfqdn())
fmt_dict = dict(princ=principal,
ccache=configuration.get('kerberos', 'principal'))
log.error("Couldn't renew kerberos ticket in order to work around "
"Kerberos 1.8.1 issue. Please check that the ticket for "
"'%(princ)s' is still renewable:\n"
" $ kinit -f -c %(ccache)s\n"
"If the 'renew until' date is the same as the 'valid starting' "
"date, the ticket cannot be renewed. Please check your KDC "
"configuration, and the ticket renewal policy (maxrenewlife) "
"for the '%(princ)s' and `krbtgt' principals." % fmt_dict)
sys.exit(ret)
def detect_conf_var():
"""Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it.
"""
ticket_cache = configuration.get('kerberos', 'ccache')
with open(ticket_cache, 'rb') as f:
# Note: this file is binary, so we check against a bytearray.
return b'X-CACHECONF:' in f.read()
def run():
if configuration.get('kerberos', 'keytab') is None:
log.debug("Keytab renewer not starting, no keytab configured")
sys.exit(0)
while True:
renew_from_kt()
time.sleep(configuration.getint('kerberos', 'reinit_frequency'))
| {
"content_hash": "c14d5ecefa3849c0b9874a727c8a901c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 93,
"avg_line_length": 38.958333333333336,
"alnum_prop": 0.6005347593582888,
"repo_name": "yati-sagade/incubator-airflow",
"id": "55566a0c16344d2e4b6f35a3f509b38ecf4bbe25",
"size": "4532",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/security/kerberos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152530"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2828163"
},
{
"name": "Shell",
"bytes": "34436"
}
],
"symlink_target": ""
} |
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' transfer the given module name, plus the async module, then run it '''
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
# shell and command module are the same
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
if "tmp" not in tmp:
tmp = self.runner._make_tmp_path(conn)
(module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
self.runner._remote_chmod(conn, 'a+rx', module_path, tmp)
return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path,
async_jid=self.runner.generated_jid,
async_limit=self.runner.background,
inject=inject
)
| {
"content_hash": "87df532f439bdb3d011e96de9ee078cf",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 143,
"avg_line_length": 38.74193548387097,
"alnum_prop": 0.6286427976686095,
"repo_name": "marcusramberg/dotfiles",
"id": "dc53d6fa6cb49095340e9c760241e9cdbb29532a",
"size": "1915",
"binary": false,
"copies": "141",
"ref": "refs/heads/main",
"path": "bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/runner/action_plugins/async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "4704"
},
{
"name": "Emacs Lisp",
"bytes": "66056"
},
{
"name": "JavaScript",
"bytes": "11846"
},
{
"name": "Jinja",
"bytes": "285"
},
{
"name": "Lua",
"bytes": "136578"
},
{
"name": "Nix",
"bytes": "9136"
},
{
"name": "Perl",
"bytes": "8914"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "9699218"
},
{
"name": "Ruby",
"bytes": "24218"
},
{
"name": "Shell",
"bytes": "416759"
},
{
"name": "Vim Script",
"bytes": "4033"
}
],
"symlink_target": ""
} |
from unittest import mock
import unittest
from pydoof.search_api.search import query, suggest
from pydoof.search_api.search import QueryNames, SearchFilterExecution
class TestSearch(unittest.TestCase):
@mock.patch('pydoof.search_api.search.SearchAPIClient')
def test_minimum_requirements_query(self, APIClientMock):
hashid = 'aab32d8'
query(hashid, 'QUERY')
APIClientMock.return_value.get.assert_called_once_with(
f'/6/{hashid}/_search',
query_params={'query': 'QUERY'}
)
@mock.patch('pydoof.search_api.search.SearchAPIClient')
def test_query(self, APIClientMock):
hashid = 'aab32d8'
indices = ['product', 'another_index']
facets = [{'field': 'brand', 'size': 10}, {'field': 'price'}]
session_id = 'SESSION_ID'
skip_to_facet = ['TOP_FACET0', 'TOP_FACET1']
skip_auto_filters = ['AUTO_FILTER0', 'AUTO_FILTER1']
page = 1
rpp = 10
query(
hashid, 'QUERY', filter={'brand': 'MyBrand'},
exclude={'color': ['blue', 'red'], 'size': 'M',
'price': {'gte': 4.36, 'lt': 99}},
facets=facets, session_id=session_id, indices=indices, query_name=QueryNames.MATCH_AND,
sort=[{'brand': 'asc'}], page=page, rpp=rpp, stats=True, filter_execution=SearchFilterExecution.OR,
skip_top_facet=skip_to_facet,
skip_auto_filters=skip_auto_filters
)
APIClientMock.return_value.get.assert_called_once_with(
f'/6/{hashid}/_search',
query_params={'query': 'QUERY',
'filter[brand]': 'MyBrand',
'exclude[color][]': ['blue', 'red'],
'exclude[size]': 'M',
'exclude[price][gte]': 4.36,
'exclude[price][lt]': 99,
'facets[0][field]': 'brand',
'facets[0][size]': 10,
'facets[1][field]': 'price',
'indices[]': indices,
'session_id': session_id,
'query_name': 'match_and',
'sort[0][brand]': 'asc',
'page': page, 'rpp': rpp,
'stats': 'true',
'filter_execution': 'or',
'skip_top_facet[]': skip_to_facet,
'skip_auto_filters[]': skip_auto_filters}
)
@mock.patch('pydoof.search_api.search.SearchAPIClient')
def test_suggest(self, APIClientMock):
hashid = 'aab32d8'
indices = ['product', 'another_index']
suggest(
hashid, 'QUERY', indices, stats=False
)
APIClientMock.return_value.get.assert_called_once_with(
f'/6/{hashid}/_suggest',
query_params={'query': 'QUERY',
'indices[]': indices,
'stats': 'false'}
)
| {
"content_hash": "9ba3c00f70296b22e0f5af7339b462ae",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 111,
"avg_line_length": 39,
"alnum_prop": 0.4960552268244576,
"repo_name": "doofinder/pydoof",
"id": "7d70333c5fb08b990f293f2f2cc25e7bffee3601",
"size": "3042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/search_api/test_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89388"
}
],
"symlink_target": ""
} |
import datasource, datepicker, dropdownlist, grid
import themeroller
# TS does currently not expose our imports, so we redeclare, explicitly:
# for clients to be able to call e.g. pykendo.DataSource(...)
class Grid(grid.Grid) : pass
class DataSource(datasource.DataSource) : pass
class DatePicker(datepicker.DatePicker) : pass
class DropDownList(dropdownlist.DropDownList): pass
# high level widgets
class ThemeRoller(themeroller.ThemeRoller) : pass
__pragma__('alias', 'jq', '$')
from tools import d
class NestedDataGrid:
base_url = None
def __init__(self, opts, selector):
self.selector = selector
self.base_url = opts.base_url
read = datasource.read(self, self.base_url, self.got_data)
self.data_source = DataSource(d(read=read))
def got_data(self, result, mode, opts):
self.schema = d = result.data.post
self.type
# make the datasource happy:
opts.success(d)
def error(self, result):
import pdb; pdb.set_trace()
| {
"content_hash": "1d78d11aa1cb9d2e76d663b2c203a24b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 29.13888888888889,
"alnum_prop": 0.6625357483317446,
"repo_name": "axiros/misc_transcrypt",
"id": "62792f0a29a3bea55112ba3f5de74cfd39dd45a0",
"size": "1049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/kendo/src/ch5/pykendo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43"
}
],
"symlink_target": ""
} |
import argparse
import rauth
import pandas as pd
import ast
import csv
import json
import pprint
import sys
import urllib
import urllib2
import time
import oauth2
import collections
from itertools import izip
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_search_parameters(location, offset):
#See the Yelp API for more details
params = {}
params["term"] = "restaurant"
params["location"] = str(location)
params["limit"] = "20"
params["offset"] = str(offset)
return params
# OAuth credential placeholders that must be filled in by users.
def get_results(params):
CONSUMER_KEY = 'uJ0d4zqfYb2-HPcRWHQMeQ'
CONSUMER_SECRET = 'gGL7EWVXQQXNzdTaO1ecR-5rC0c'
TOKEN = 'HQfCLQx5llcGq8uPaEFNTYouKnr-Mtmu'
TOKEN_SECRET = 'dIajmSm-h9ag-0DLKY8Uo7fh_fk'
session = rauth.OAuth1Session(
consumer_key = CONSUMER_KEY
,consumer_secret = CONSUMER_SECRET
,access_token = TOKEN
,access_token_secret = TOKEN_SECRET)
request = session.get("http://api.yelp.com/v2/search",params=params)
#Transforms the JSON API response into a Python dictionary
data = request.json()
if not data:
return None
for k in data:
if 'error' in k:
return None
session.close()
return data
def main():
locations = [94102,94103, 94104, 94105, 94107, 94108, 94109, 94110, 94111, 94112, 94114, 94115, 94116, 94117, 94118, 94121, 94122, 94123, 94124, 94127, 94129, 94130, 94131, 94132, 94133, 94134, 94158]
api_calls = []
for location in locations:
for i in range(0, 1000, 20):
params = get_search_parameters(location, offset=i)
response = get_results(params)
if not response:
continue
api_calls.append(response)
# with open('yelp_raw.txt', 'w') as file:
# json.dumps(response, file)
# with open("yelp_raw.txt", "r") as file:
# yelps = json.load(file)
# for yelp in yelps:
# yelp_data.append(yelp)
#Be a good internet citizen and rate-limit yourself
yelp_data = []
for i in api_calls:
yelp_data.append(i['businesses'])
yelp_data = [item for sublist in yelp_data for item in sublist]
yelps = pd.DataFrame()
yelps['id'] = map(lambda yelp: (yelp['id']), yelp_data)
yelps['name'] = map(lambda yelp:(yelp['name']), yelp_data)
yelps['is_closed'] = map(lambda yelp:(yelp['is_closed']), yelp_data)
yelps['image_url'] = map(lambda yelp:(yelp.get('image_url')), yelp_data)
yelps['url'] = map(lambda yelp:(yelp['url']), yelp_data)
yelps['mobile_url'] = map(lambda yelp:(yelp['mobile_url']), yelp_data)
yelps['phone'] = map(lambda yelp:(yelp.get('phone')),yelp_data)
yelps['display_phone'] = map(lambda yelp:(str(yelp.get('display_phone')))[1:],yelp_data)
yelps['address'] = map(lambda yelp:(str(yelp['location']['address'])[3:-2]), yelp_data)
yelps['display_address'] = map(lambda yelp:(yelp['location']['display_address'][0]), yelp_data)
yelps['coordinate'] = (map(lambda yelp:((ast.literal_eval(str(yelp['location'].get(u'coordinate'))))), yelp_data))
yelps['review_count'] = map(lambda yelp:(yelp.get('review_count')), yelp_data)
yelps['categories'] = map(lambda yelp:((yelp.get(u'categories'))), yelp_data)
yelps['rating'] = map(lambda yelp:(yelp['rating']), yelp_data)
yelps['rating_image_url'] = map(lambda yelp:(yelp['rating_img_url']), yelp_data)
yelps['rating_image_url_small'] = map(lambda yelp:(yelp['rating_img_url_small']), yelp_data)
yelps['rating_image_url_large'] = map(lambda yelp:(yelp['rating_img_url_large']), yelp_data)
yelps['snippet_text'] = map(lambda yelp:(yelp.get('snippet_text')), yelp_data)
yelps['snippet_image_url'] = map(lambda yelp:(yelp.get('snippet_image_url')), yelp_data)
yelps.to_csv('ratings1.csv', index=False, encoding='utf-8')
if __name__ == "__main__":
main() | {
"content_hash": "04b5b3bab9b893bafedb559a2aa87349",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 202,
"avg_line_length": 37.30275229357798,
"alnum_prop": 0.6625676340383669,
"repo_name": "vjadon/Front-End-workspace",
"id": "14c579c3e571b9962ce6cdbb609839a4c76a92d9",
"size": "4090",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LHB - ETL Module/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "12842"
},
{
"name": "Python",
"bytes": "11434"
},
{
"name": "Web Ontology Language",
"bytes": "24969"
}
],
"symlink_target": ""
} |
"""UWUM (Unified WeGovNow User Management) provider for django-allauth."""
from os import path
from setuptools import setup, find_packages
name = 'django-allauth-uwum'
version = __import__('allauth_uwum').__version__
repository = path.join('https://github.com/ExCiteS', name)
setup(
name=name,
version=version,
description='UWUM provider for django-allauth',
url=repository,
download_url=path.join(repository, 'tarball', version),
author='Julius Osokinas',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=['django-allauth >= 0.27.0'],
classifiers=[
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
],
)
| {
"content_hash": "4da9a11c2bd848a17f9206596faa0707",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6410496719775071,
"repo_name": "ExCiteS/django-allauth-uwum",
"id": "140ad3b3368a7ce01f8d655369f310dba8e0ecfa",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8773"
}
],
"symlink_target": ""
} |
import httplib,urllib
import random
import time
import datetime
from threading import Thread
def get_url():
switch_dict={
0:'hadoop',
1:'spark',
2:'apple',
3:'hello',
4:'mapreduce',
5:'java',
6:'python',
7:'PHP',
8:'Perl',
9:'Bash'
}
i = int((random.random() * 100) % 10)
url = 'http://localhost:8765'
url = url+'/'+switch_dict[i]
return url
# 多线程操作,定义一个类
class HttpRequest(Thread):
def __init__(self):
Thread.__init__(self)
def http_request(self):
try:
url = get_url()
res = urllib.urlopen(url)
#print 'Get ',res.geturl()
res.close()
except:
pass
def run(self):
self.http_request()
def multi_threads(threads_num):
for i in range(threads_num):
worker = HttpRequest()
worker.daemon = True
worker.start()
startTime = datetime.datetime.now()
print '生成一百万行日志数据'
print '任务开始于: ',startTime
multi_threads(1000000)
endTime = datetime.datetime.now()
print '任务结束于: ',endTime
print ' 总共花费时间: ',(endTime-startTime).total_seconds(),'s'
| {
"content_hash": "f43735db5f459f1a2ea98ecb83c4249a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 57,
"avg_line_length": 18.96153846153846,
"alnum_prop": 0.6561866125760649,
"repo_name": "tianqichongzhen/ProgramPrac",
"id": "d1ddd03f4a17274d93886f9ffe80f8b81b3c6474",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exercise/Python/http_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "91522"
},
{
"name": "C++",
"bytes": "398"
},
{
"name": "Jupyter Notebook",
"bytes": "5183728"
},
{
"name": "Makefile",
"bytes": "810"
},
{
"name": "Python",
"bytes": "300422"
},
{
"name": "R",
"bytes": "21095"
},
{
"name": "Shell",
"bytes": "2619"
},
{
"name": "TeX",
"bytes": "4222"
}
],
"symlink_target": ""
} |
import string
def ignore_punctuation_spaces_case(text):
return "".join(i.lower() for i in text if i in string.ascii_letters)
def reverse(text):
return text[::-1]
def is_palindrome(text):
text = ignore_punctuation_spaces_case(text)
return text == reverse(text)
something = raw_input("Enter text: ")
if is_palindrome(something):
print "Yes, it is a plindrome"
else:
print "No, it is not a palindrome"
| {
"content_hash": "e73edd495ec6e87edf150304c51efcea",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 25.11764705882353,
"alnum_prop": 0.6908665105386417,
"repo_name": "pezy/python_test",
"id": "84c8a7106e3d249e3a71642c577663490a600ce5",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byteofpy/input.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21331"
}
],
"symlink_target": ""
} |
from stromx import runtime
import unittest
class ExceptionsTest(unittest.TestCase):
def setUp(self):
stream = runtime.Stream()
counter = stream.addOperator(runtime.Counter())
dump = stream.addOperator(runtime.Dump())
stream.initializeOperator(counter)
stream.initializeOperator(dump)
stream.connect(counter, 0, dump, 0)
thread = stream.addThread()
thread.addInput(dump, 0)
stream.setDelay(100)
self.stream = stream
self.dump = dump
self.counter = counter
def tearDown(self):
self.stream = None
def testExceptions(self):
s = runtime.Stream()
try:
s.pause()
except runtime.Exception as e:
print(str(e))
try:
s.pause()
except runtime.WrongState as e:
print(str(e))
i = -1
try:
s.pause()
except runtime.WrongId as e:
print(str(e))
i = 0
except runtime.WrongState as e:
print(str(e))
i = 1
except runtime.Exception as e:
print(str(e))
i = 2
except Exception as e:
print(str(e))
i = 3
self.assertEqual(1, i)
def testRemoveConnection(self):
self.assertRaises(runtime.WrongArgument,
self.stream.disconnect, self.dump, 1)
self.assertRaises(runtime.Exception,
self.stream.disconnect, None, 0)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4d238910247dbdad3873a713b1eba8a6",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 63,
"avg_line_length": 24.924242424242426,
"alnum_prop": 0.5148936170212766,
"repo_name": "uboot/stromx",
"id": "6695991ab2895f00e47606cf5d2676fda0694c2e",
"size": "1670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1770634"
},
{
"name": "CMake",
"bytes": "48497"
},
{
"name": "Python",
"bytes": "24787"
}
],
"symlink_target": ""
} |
from weboob.core.modules import ModuleLoadError
from weboob.exceptions import BrowserIncorrectPassword
from connector import Connector
import sys
def EnsurePrint(wat):
try:
print wat
return wat
except:
wat = unicode(wat).encode('utf-8')
print wat
class BaseBankHandler(object):
'''
Base class to handle utility methods.
'''
def __init__(self, login, password, website = None):
self.login = login
self.password = password
self.website = website
def load_from_connector(self, name, method_name):
'''
Load given connector (name) and apply the given method on it.
Supported method: get_accounts and get_history.
Expected fields:
* login
* password
* website (optional)
'''
params_connector = {
'login': self.login,
'password': self.password,
}
if self.website is not None:
params_connector['website'] = self.website
try:
connector = Connector(name, params_connector)
results = {}
callback = getattr(connector, method_name)
results[name] = callback()
except ModuleLoadError:
raise Exception("Could not load module: %s" % name)
except BrowserIncorrectPassword:
raise Exception("Wrong credentials")
except Exception as e:
EnsurePrint(unicode(e))
raise Exception("Something went wrong (weboob modules should "
"probably be updated)")
return results
class BankHandler(BaseBankHandler):
"""
This handler is dedicated to retrieve data from bank accounts.
"""
def post(self, name):
"""
Grab data about all accounts from a given bank identifier.
Bank type is given as URL parameter, credentials are given in body.
For available bank type check: http://weboob.org/modules
"""
return self.load_from_connector(name, 'get_balances')
class BankHistoryHandler(BaseBankHandler):
"""
This handler is dedicated to retrieve transaction history of bank accounts.
"""
def post(self, name):
"""
Grab history of all accounts from a given bank identifier.
Bank type is given as URL parameter, credentials are given in body.
For available bank type check: http://weboob.org/modules
"""
return self.load_from_connector(name, 'get_history')
| {
"content_hash": "52d7be08d0bff96fc4bb9dc798cf2915",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 27.813186813186814,
"alnum_prop": 0.6108257605689451,
"repo_name": "frankrousseau/kresus",
"id": "42f658a5c9991887549051ae898f99d39d8ef93e",
"size": "2531",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "weboob/py/lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22644"
},
{
"name": "CoffeeScript",
"bytes": "87614"
},
{
"name": "HTML",
"bytes": "1473"
},
{
"name": "JavaScript",
"bytes": "151729"
},
{
"name": "Python",
"bytes": "5275"
},
{
"name": "Shell",
"bytes": "634"
}
],
"symlink_target": ""
} |
"""
===============================================================================
Original code copyright (C) 2009-2022 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Unit tests.**
"""
import logging
import unittest
from cardinal_pythonlib.rate_limiting import rate_limited
log = logging.getLogger(__name__)
@rate_limited(2)
def _test_print_2hz(num: int) -> None:
log.info(f"_test_print_2hz: {num}")
@rate_limited(5)
def _test_print_5hz(num: int) -> None:
log.info(f"_test_print_5hz: {num}")
def _test_print(num: int) -> None:
log.info(f"_test_print: {num}")
class RateLimitingTests(unittest.TestCase):
@staticmethod
def test_rate_limiter() -> None:
"""
Test the rate-limiting functions.
"""
n = 10
log.info("Via decorator, 2 Hz")
for i in range(1, n + 1):
_test_print_2hz(i)
log.info("Via decorator, 5 Hz")
for i in range(1, n + 1):
_test_print_5hz(i)
log.info("Created dynamically, 10 Hz")
tenhz = rate_limited(10)(_test_print)
for i in range(1, n + 1):
tenhz(i)
log.info("Created dynamically, unlimited")
unlimited = rate_limited(None)(_test_print)
for i in range(1, n + 1):
unlimited(i)
| {
"content_hash": "f44306269624b21d1c55fbe2032638b5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 28.985294117647058,
"alnum_prop": 0.5819381024860477,
"repo_name": "RudolfCardinal/pythonlib",
"id": "7e8a7752148e2d1ccdecfcab2a0c6816371d2d02",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cardinal_pythonlib/tests/rate_limiting_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1987146"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
} |
from pants.base.exceptions import TaskError
from pants.base.build_environment import get_scm
from pants.task.task import Task
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.util.contextutil import temporary_dir
import os
import stat
import subprocess
import logging
import pystache
from kubernetes import config, client
logger = logging.getLogger(__name__)
CLUSTER_SPECIFIC_METADATA = [
'creationTimestamp',
'resourceVersion',
'uid',
'selfLink']
class K8SCloneNamespace(Task):
@classmethod
def register_options(cls, register):
branch = get_scm().branch_name
register('--namespace', type=str, default=branch,
help='Namespace to create.')
super(K8SCloneNamespace, cls).register_options(register)
@property
def new_namespace(self):
return self.get_options().namespace
def execute(self):
config.load_kube_config()
v1 = client.CoreV1Api()
beta = client.ExtensionsV1beta1Api()
# TODO: Make idempotent!
self.create_namespace(v1)
self.clone_configmaps(v1)
self.clone_services(v1)
self.clone_secrets(v1)
self.clone_deployments(beta)
def create_namespace(self, k8s):
found = False
for ns in k8s.list_namespace().items:
found = found or ns.metadata.name == self.new_namespace
if not found:
k8s.create_namespace({'metadata': {'name': self.new_namespace}})
def clone_services(self, k8s):
for service in k8s.list_namespaced_service('devo').items:
if service.spec.type == 'NodePort':
logger.warning('Ignoring NodePort service %s' % service.metadata.name)
continue
service.metadata = { 'name': service.metadata.name, 'namespace': self.new_namespace }
service.spec.cluster_ip = None
k8s.create_namespaced_service(self.new_namespace, service)
def clone_secrets(self, k8s):
for secret in k8s.list_namespaced_secret('devo').items:
if secret.type != 'Opaque':
continue
secret.metadata = { 'name': secret.metadata.name, 'namespace': self.new_namespace }
k8s.create_namespaced_secret(self.new_namespace, secret)
def clone_deployments(self, k8s):
for deploy in k8s.list_namespaced_deployment('devo').items:
deploy.metadata = { 'name': deploy.metadata.name, 'labels': deploy.metadata.labels, 'namespace': self.new_namespace }
deploy.status = {}
deploy.spec.template.metadata.creation_timestamp = None
k8s.create_namespaced_deployment(self.new_namespace, deploy)
def clone_configmaps(self, k8s):
for configmap in k8s.list_namespaced_config_map('devo').items:
configmap.metadata = { 'name': configmap.metadata.name, 'namespace': self.new_namespace }
k8s.create_namespaced_config_map(self.new_namespace, configmap)
def rinse_metadata(self, obj):
meta = obj.metadata.to_dict()
meta['namespace'] = self.new_namespace
for attr in CLUSTER_SPECIFIC_METADATA:
meta.pop(attr, None)
obj.metadata = meta
| {
"content_hash": "0d1ccd0b2188c2bd8db0080d8a4d8fcd",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 123,
"avg_line_length": 33.64772727272727,
"alnum_prop": 0.7041540020263425,
"repo_name": "toddgardner/pants-plugins",
"id": "d8ebd7604c67f378bd4173f9f805ef3b977d13c9",
"size": "2961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/verst/pants/k8s/k8s_clone_namespace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "72116"
},
{
"name": "Scala",
"bytes": "8201"
},
{
"name": "Shell",
"bytes": "7886"
}
],
"symlink_target": ""
} |
from hwt.hdl.constants import DIRECTION
from hwt.interfaces.std import Signal, VectSignal
from hwt.math import log2ceil
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.param import Param
class LocalLink(Interface):
"""
Stream with "byte enable" and "start/end of frame/packet"
.. hwt-autodoc::
"""
def _config(self):
self.DATA_WIDTH = Param(32)
def _declr(self):
self.data = VectSignal(self.DATA_WIDTH)
self.rem = VectSignal(log2ceil(self.DATA_WIDTH // 8))
self.src_rdy_n = Signal()
self.dst_rdy_n = Signal(masterDir=DIRECTION.IN)
self.sof_n = Signal()
self.eof_n = Signal()
self.eop_n = Signal()
self.sop_n = Signal()
| {
"content_hash": "6071f2e8e6f67d2499d280d3acf24453",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 61,
"avg_line_length": 28.5,
"alnum_prop": 0.6464237516869096,
"repo_name": "Nic30/hwtLib",
"id": "4dc01c2b2cb5975a67b710c5aee2ccdffd4e39c7",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/xilinx/locallink/intf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
} |
'''Test the taq.processing module'''
import pytest
from pytest import mark
@mark.xfail
def test_split_chunks():
raise NotImplementedError
if __name__ == '__main__':
pytest.main("test_processing.py")
| {
"content_hash": "09b6a3accbd5d6015f0c67e4b414d12f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 37,
"avg_line_length": 17.583333333333332,
"alnum_prop": 0.6872037914691943,
"repo_name": "dlab-projects/python-taq",
"id": "84f1d2ee5b0c274358953104255bec1493961dcc",
"size": "211",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_processing.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4751"
},
{
"name": "Python",
"bytes": "38066"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
@register.inclusion_tag("curator/widget_block_head.html")
def widget_tag_head(widget, fullscreen=0):
#widget_data, time_intervals = widget.data_list()
return {
'widget': widget,
'model_name': widget.get_model_display(),
'fullscreen': fullscreen,
#'time_intervals': time_intervals,
#'data_points': widget_data,
}
@register.inclusion_tag("curator/widget_block_body.html")
def widget_tag_body(widget):
return {
'widget': widget,
}
| {
"content_hash": "4aa37819dd26bac8ead9e8772e866e1e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 57,
"avg_line_length": 26.863636363636363,
"alnum_prop": 0.6142131979695431,
"repo_name": "spulec/django-curator",
"id": "290f1ed841715a4ae261459f99d4d2e62acc6b1c",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curator/templatetags/widget_tag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "100719"
},
{
"name": "Python",
"bytes": "18039"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponse
from symptom_tracker.models import SymptomEntry, Symptom
def index(request):
symptoms = SymptomEntry.objects.all()
data = {}
data['symptoms'] = symptoms
return render(request, 'index.html', data)
def add_symptom_entry(request):
symptom = request.POST.get('symptom')
severity = request.POST.get('severity')
entry = SymptomEntry()
entry.symptom = symptom
entry.severity = severity
entry.save()
entries = SymptomEntry.objects.all()
return render(request,'symptom_list.html', {'symptoms': entries})
def symptom_manager(request):
symptoms = Symptom.objects.all()
data = {}
data['symptoms'] = symptoms
return render(request, 'symptom_manager.html', data)
def add_symptom(request):
name = request.POST.get('name')
symptom = Symptom(name=name)
symptom.save()
symptoms = Symptom.objects.all()
return render(request, 'symptom_rows.html', {'symptoms': symptoms}) | {
"content_hash": "85f720ef59d0a6e22b62b1bd04cf4b5a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 23.790697674418606,
"alnum_prop": 0.6881720430107527,
"repo_name": "sbelskie/symplicity",
"id": "9792383abe26de72c2d9412e23c420621b4dda8a",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symptom_tracker/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "165"
},
{
"name": "JavaScript",
"bytes": "2027"
},
{
"name": "Python",
"bytes": "16495"
}
],
"symlink_target": ""
} |
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QApplication
from sakia.models.generic_tree import GenericTreeModel
from sakia.data.processors import ContactsProcessor
from duniterpy.documents.crc_pubkey import CRCPubkey
class NavigationModel(QObject):
"""
The model of Navigation component
"""
navigation_changed = pyqtSignal(GenericTreeModel)
def __init__(self, parent, app):
"""
:param sakia.gui.component.controller.ComponentController parent:
:param sakia.app.Application app:
"""
super().__init__(parent)
self.app = app
self.navigation = []
self._current_data = None
self._contacts_processor = ContactsProcessor.instanciate(self.app)
def handle_identity_change(self, identity):
for node in self.navigation[3]['children']:
if node['component'] == "Informations":
connection = node["misc"]["connection"]
if connection.pubkey == identity.pubkey and connection.uid == identity.uid:
icon = self.identity_icon(connection)
node["icon"] = icon
return node
def init_navigation_data(self):
self.navigation = [
{
'title': self.tr('Network'),
'icon': ':/icons/network_icon',
'component': "Network",
'dependencies': {
'network_service': self.app.network_service,
},
'misc': {
},
'children': []
},
{
'title': self.tr('Identities'),
'icon': ':/icons/members_icon',
'component': "Identities",
'dependencies': {
'blockchain_service': self.app.blockchain_service,
'identities_service': self.app.identities_service,
},
'misc': {
}
},
{
'title': self.tr('Web of Trust'),
'icon': ':/icons/wot_icon',
'component': "Wot",
'dependencies': {
'blockchain_service': self.app.blockchain_service,
'identities_service': self.app.identities_service,
},
'misc': {
}
},
{
'title': self.tr('Personal accounts'),
'children': []
}
]
self._current_data = self.navigation[0]
for connection in self.app.db.connections_repo.get_all():
self.navigation[3]['children'].append(self.create_node(connection))
try:
self._current_data = self.navigation[0]
except IndexError:
self._current_data = None
return self.navigation
def create_node(self, connection):
matching_contact = self._contacts_processor.get_one(pubkey=connection.pubkey)
if matching_contact:
title = matching_contact.displayed_text()
else:
title = connection.title()
if connection.uid:
node = {
'title': title,
'component': "Informations",
'icon': self.identity_icon(connection),
'dependencies': {
'blockchain_service': self.app.blockchain_service,
'identities_service': self.app.identities_service,
'sources_service': self.app.sources_service,
'connection': connection,
},
'misc': {
'connection': connection
},
'children': [
{
'title': self.tr('Transfers'),
'icon': ':/icons/tx_icon',
'component': "TxHistory",
'dependencies': {
'connection': connection,
'identities_service': self.app.identities_service,
'blockchain_service': self.app.blockchain_service,
'transactions_service': self.app.transactions_service,
"sources_service": self.app.sources_service
},
'misc': {
'connection': connection
}
}
]
}
else:
node = {
'title': title,
'component': "TxHistory",
'icon': ':/icons/tx_icon',
'dependencies': {
'connection': connection,
'identities_service': self.app.identities_service,
'blockchain_service': self.app.blockchain_service,
'transactions_service': self.app.transactions_service,
"sources_service": self.app.sources_service
},
'misc': {
'connection': connection
},
'children': []
}
return node
def identity_icon(self, connection):
if self.identity_is_member(connection):
return ':/icons/member'
else:
return ':/icons/not_member'
def view_in_wot(self, connection):
identity = self.app.identities_service.get_identity(connection.pubkey, connection.uid)
self.app.view_in_wot.emit(identity)
def generic_tree(self):
return GenericTreeModel.create("Navigation", self.navigation[3]['children'])
def add_connection(self, connection):
raw_node = self.create_node(connection)
self.navigation[3]["children"].append(raw_node)
return raw_node
def set_current_data(self, raw_data):
self._current_data = raw_data
def current_data(self, key):
return self._current_data.get(key, None)
def _lookup_raw_data(self, raw_data, component, **kwargs):
if raw_data['component'] == component:
if kwargs:
for k in kwargs:
if raw_data['misc'].get(k, None) == kwargs[k]:
return raw_data
else:
return raw_data
for c in raw_data.get('children', []):
children_data = self._lookup_raw_data(c, component, **kwargs)
if children_data:
return children_data
def get_raw_data(self, component, **kwargs):
for data in self.navigation:
raw_data = self._lookup_raw_data(data, component, **kwargs)
if raw_data:
return raw_data
def current_connection(self):
if self._current_data:
return self._current_data['misc'].get('connection', None)
else:
return None
def generate_revocation(self, connection, secret_key, password):
return self.app.documents_service.generate_revocation(connection, secret_key, password)
def identity_published(self, connection):
identity = self.app.identities_service.get_identity(connection.pubkey, connection.uid)
if identity:
return identity.written
else:
return False
def identity_is_member(self, connection):
identity = self.app.identities_service.get_identity(connection.pubkey, connection.uid)
if identity:
return identity.member
else:
return False
async def remove_connection(self, connection):
for data in self.navigation:
connected_to = self._current_data['misc'].get('connection', None)
if connected_to == connection:
try:
self._current_data['widget'].disconnect()
except TypeError as e:
if "disconnect()" in str(e):
pass
else:
raise
await self.app.remove_connection(connection)
async def send_leave(self, connection, secret_key, password):
return await self.app.documents_service.send_membership(connection, secret_key, password, "OUT")
async def send_identity(self, connection, identity_doc):
return await self.app.documents_service.broadcast_identity(connection, identity_doc)
def generate_identity(self, connection):
return self.app.documents_service.generate_identity(connection)
def update_identity(self, identity):
self.app.identities_service.insert_or_update_identity(identity)
def notifications(self):
return self.app.parameters.notifications
@staticmethod
def copy_pubkey_to_clipboard(connection):
clipboard = QApplication.clipboard()
clipboard.setText(connection.pubkey)
@staticmethod
def copy_pubkey_to_clipboard_with_crc(connection):
clipboard = QApplication.clipboard()
clipboard.setText(str(CRCPubkey.from_pubkey(connection.pubkey)))
| {
"content_hash": "230e368ebd2197ebfa7398da7ac18491",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 104,
"avg_line_length": 37.10612244897959,
"alnum_prop": 0.5318446815531844,
"repo_name": "ucoin-io/cutecoin",
"id": "d77cfcd046355b6007fddfca0737811c8ee24ae8",
"size": "9091",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sakia/gui/navigation/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2475"
},
{
"name": "JavaScript",
"bytes": "1594"
},
{
"name": "PowerShell",
"bytes": "3111"
},
{
"name": "Python",
"bytes": "718811"
},
{
"name": "Shell",
"bytes": "3983"
}
],
"symlink_target": ""
} |
"""
WSGI config for Cookie Cutter Demo project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "3343c514f4a6740e0405aa04333e289f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 47.72222222222222,
"alnum_prop": 0.7904540162980209,
"repo_name": "geoanalytic/gpstracker",
"id": "c31dda999fb070c6cfeda2822937cc600924ac8f",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15187"
},
{
"name": "HTML",
"bytes": "21885"
},
{
"name": "JavaScript",
"bytes": "5087"
},
{
"name": "Nginx",
"bytes": "1275"
},
{
"name": "Python",
"bytes": "61626"
},
{
"name": "Shell",
"bytes": "7274"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import mt
from . import state
from . import subTLVs
from . import undefined_subtlvs
class prefix(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of IPv6 prefixes contained within MT reachability TLV.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__mt",
"__state",
"__subTLVs",
"__undefined_subtlvs",
)
_yang_name = "prefix"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__mt = YANGDynClass(
base=mt.mt,
is_container="container",
yang_name="mt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-ipv6-reachability",
"prefixes",
"prefix",
]
def _get_mt(self):
"""
Getter method for mt, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/mt (container)
YANG Description: Multi-topology parameters
"""
return self.__mt
def _set_mt(self, v, load=False):
"""
Setter method for mt, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/mt (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mt() directly.
YANG Description: Multi-topology parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=mt.mt,
is_container="container",
yang_name="mt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mt must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=mt.mt, is_container='container', yang_name="mt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__mt = t
if hasattr(self, "_set"):
self._set()
def _unset_mt(self):
self.__mt = YANGDynClass(
base=mt.mt,
is_container="container",
yang_name="mt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/state (container)
YANG Description: State parameters of IPv6 prefix attributes
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IPv6 prefix attributes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs (container)
YANG Description: This container describes IS prefix sub-TLVs.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: This container describes IS prefix sub-TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subTLVs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=subTLVs.subTLVs, is_container='container', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__subTLVs = t
if hasattr(self, "_set"):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_undefined_subtlvs(self):
"""
Getter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/undefined_subtlvs (container)
YANG Description: This container describes undefined ISIS TLVs.
"""
return self.__undefined_subtlvs
def _set_undefined_subtlvs(self, v, load=False):
"""
Setter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/undefined_subtlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlvs() directly.
YANG Description: This container describes undefined ISIS TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """undefined_subtlvs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=undefined_subtlvs.undefined_subtlvs, is_container='container', yang_name="undefined-subtlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__undefined_subtlvs = t
if hasattr(self, "_set"):
self._set()
def _unset_undefined_subtlvs(self):
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
mt = __builtin__.property(_get_mt)
state = __builtin__.property(_get_state)
subTLVs = __builtin__.property(_get_subTLVs)
undefined_subtlvs = __builtin__.property(_get_undefined_subtlvs)
_pyangbind_elements = OrderedDict(
[
("mt", mt),
("state", state),
("subTLVs", subTLVs),
("undefined_subtlvs", undefined_subtlvs),
]
)
from . import mt
from . import state
from . import subTLVs
from . import undefined_subtlvs
class prefix(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of IPv6 prefixes contained within MT reachability TLV.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__mt",
"__state",
"__subTLVs",
"__undefined_subtlvs",
)
_yang_name = "prefix"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__mt = YANGDynClass(
base=mt.mt,
is_container="container",
yang_name="mt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-ipv6-reachability",
"prefixes",
"prefix",
]
def _get_mt(self):
"""
Getter method for mt, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/mt (container)
YANG Description: Multi-topology parameters
"""
return self.__mt
def _set_mt(self, v, load=False):
"""
Setter method for mt, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/mt (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mt() directly.
YANG Description: Multi-topology parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=mt.mt,
is_container="container",
yang_name="mt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mt must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=mt.mt, is_container='container', yang_name="mt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__mt = t
if hasattr(self, "_set"):
self._set()
def _unset_mt(self):
self.__mt = YANGDynClass(
base=mt.mt,
is_container="container",
yang_name="mt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/state (container)
YANG Description: State parameters of IPv6 prefix attributes
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IPv6 prefix attributes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs (container)
YANG Description: This container describes IS prefix sub-TLVs.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: This container describes IS prefix sub-TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subTLVs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=subTLVs.subTLVs, is_container='container', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__subTLVs = t
if hasattr(self, "_set"):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_undefined_subtlvs(self):
"""
Getter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/undefined_subtlvs (container)
YANG Description: This container describes undefined ISIS TLVs.
"""
return self.__undefined_subtlvs
def _set_undefined_subtlvs(self, v, load=False):
"""
Setter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/undefined_subtlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlvs() directly.
YANG Description: This container describes undefined ISIS TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """undefined_subtlvs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=undefined_subtlvs.undefined_subtlvs, is_container='container', yang_name="undefined-subtlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__undefined_subtlvs = t
if hasattr(self, "_set"):
self._set()
def _unset_undefined_subtlvs(self):
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
mt = __builtin__.property(_get_mt)
state = __builtin__.property(_get_state)
subTLVs = __builtin__.property(_get_subTLVs)
undefined_subtlvs = __builtin__.property(_get_undefined_subtlvs)
_pyangbind_elements = OrderedDict(
[
("mt", mt),
("state", state),
("subTLVs", subTLVs),
("undefined_subtlvs", undefined_subtlvs),
]
)
| {
"content_hash": "3873cc2dd7d89c4dbb99644d0853c47c",
"timestamp": "",
"source": "github",
"line_count": 834,
"max_line_length": 411,
"avg_line_length": 40.31175059952039,
"alnum_prop": 0.5825698988697204,
"repo_name": "napalm-automation/napalm-yang",
"id": "a926f1e506f66f4803a0a2f3a841571f9a73f1b0",
"size": "33644",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
"""
test solver using examples
"""
import json
import solver
import unittest
def get_status(source):
""" run program and get status
"""
with open(source, 'r') as ofile:
source = ofile.read()
res = solver.solve(source)
json_res = json.loads(res)
return json_res["result"]
def run():
print(get_status("./examples/temp.cos"))
#print(get_status("./examples/sqlrewrites/CQExample0.cos"))
#print(get_status("./examples/sqlrewrites/SelfJoin0.cos"))
#print(get_status("./examples/sqlrewrites/commutativeSelect.cos"))
#print(get_status("./examples/sqlrewrites/inlineCorrelatedSubqueries.cos"))
#print(get_status("./examples/sqlrewrites/projectionDistributesOverUnion.cos"))
#print(get_status("./examples/sqlrewrites/projectJoinTranspose.cos"))
#print(get_status("./examples/sqlrewrites/joinCommute.cos"))
#print(get_status("./examples/sqlrewrites/timesAndDiv.cos"))
#print(get_status("./examples/sqlrewrites/countProject.cos"))
#print(get_status("./examples/calcite/testAggregateConstantKeyRule2.cos"))
#print(get_status("./examples/calcite/testRemoveSemiJoinRightWithFilter.cos"))
#print(get_status("./examples/calcite/testAggregateGroupingSetsProjectMerge.cos"))
#print(get_status("./examples/sqlrewrites/aggOnExpr.cos"))
#print(get_status("./examples/calcite/testRemoveSemiJoinRight.cos"))
#print(get_status("./examples/sqlrewrites/havingToWhere.cos"))
#print(get_status("./examples/sqlrewrites/pullsubquery.cos"))
#print(get_status("./examples/inequal_queries/344-exam-1.cos"))
#print(get_status("./examples/inequal_queries/countbug.cos"))
#print(get_status("./examples/inequal_queries/inline-exists.cos"))
#print(get_status("./examples/inequal_queries/issue29.cos"))
#print(get_status("./examples/sqlrewrites/unionEmpty.cos"))
#print(get_status("./examples/inequal_queries/string_ex1.cos"))
if __name__ == '__main__':
run()
| {
"content_hash": "55ad31a5ba3d6c2f1b39899cdd27a5cd",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 44.63636363636363,
"alnum_prop": 0.7072301425661914,
"repo_name": "uwdb/Cosette",
"id": "065ca49b549a41f33608911a95e5797c93e7c8fa",
"size": "1964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manual_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Coq",
"bytes": "248069"
},
{
"name": "Dockerfile",
"bytes": "1445"
},
{
"name": "Emacs Lisp",
"bytes": "652"
},
{
"name": "Haskell",
"bytes": "133827"
},
{
"name": "Lean",
"bytes": "671891"
},
{
"name": "Makefile",
"bytes": "1183"
},
{
"name": "Python",
"bytes": "23476"
},
{
"name": "Racket",
"bytes": "409032"
},
{
"name": "Shell",
"bytes": "2757"
}
],
"symlink_target": ""
} |
import urlparse
from urlparse import urlsplit
from coherence.extern.et import parse_xml as et_parse_xml
from coherence import SERVER_ID
from twisted.web import server, http, static
from twisted.web import client, error
from twisted.web import proxy, resource, server
from twisted.internet import reactor,protocol,defer,abstract
from twisted.python import failure
from twisted.python.util import InsensitiveDict
try:
from twisted.protocols._c_urlarg import unquote
except ImportError:
from urllib import unquote
try:
import netifaces
have_netifaces = True
except ImportError:
have_netifaces = False
def means_true(value):
if isinstance(value,basestring):
value = value.lower()
return value in [True,1,'1','true','yes','ok']
def generalise_boolean(value):
""" standardize the different boolean incarnations
transform anything that looks like a "True" into a '1',
and everything else into a '0'
"""
if means_true(value):
return '1'
return '0'
generalize_boolean = generalise_boolean
def parse_xml(data, encoding="utf-8"):
return et_parse_xml(data,encoding)
def parse_http_response(data):
""" don't try to get the body, there are reponses without """
header = data.split('\r\n\r\n')[0]
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
return cmd, headers
def get_ip_address(ifname):
"""
determine the IP address by interface name
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/439094
(c) Paul Cannon
Uses the Linux SIOCGIFADDR ioctl to find the IP address associated
with a network interface, given the name of that interface, e.g. "eth0".
The address is returned as a string containing a dotted quad.
Updated to work on BSD. OpenBSD and OSX share the same value for
SIOCGIFADDR, and its likely that other BSDs do too.
Updated to work on Windows,
using the optional Python module netifaces
http://alastairs-place.net/netifaces/
Thx Lawrence for that patch!
"""
if have_netifaces:
if ifname in netifaces.interfaces():
iface = netifaces.ifaddresses(ifname)
ifaceadr = iface[netifaces.AF_INET]
# we now have a list of address dictionaries, there may be multiple addresses bound
return ifaceadr[0]['addr']
import sys
if sys.platform in ('win32','sunos5'):
return '127.0.0.1'
from os import uname
import socket
import fcntl
import struct
system_type = uname()[0]
if system_type == "Linux":
SIOCGIFADDR = 0x8915
else:
SIOCGIFADDR = 0xc0206921
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname[:15])
)[20:24])
except:
return '127.0.0.1'
def get_host_address():
""" try to get determine the interface used for
the default route, as this is most likely
the interface we should bind to (on a single homed host!)
"""
import sys
if sys.platform == 'win32':
if have_netifaces:
interfaces = netifaces.interfaces()
if len(interfaces):
return get_ip_address(interfaces[0]) # on windows assume first interface is primary
else:
try:
route_file = '/proc/net/route'
route = open(route_file)
if(route):
tmp = route.readline() #skip first line
while (tmp != ''):
tmp = route.readline()
l = tmp.split('\t')
if (len(l) > 2):
if l[1] == '00000000': #default route...
route.close()
return get_ip_address(l[0])
except IOError, msg:
""" fallback to parsing the output of netstat """
from twisted.internet import utils
def result(r):
from os import uname
(osname,_, _, _,_) = uname()
osname = osname.lower()
lines = r.split('\n')
for l in lines:
l = l.strip(' \r\n')
parts = [x.strip() for x in l.split(' ') if len(x) > 0]
if parts[0] in ('0.0.0.0','default'):
if osname[:6] == 'darwin':
return get_ip_address(parts[5])
else:
return get_ip_address(parts[-1])
return '127.0.0.1'
def fail(f):
return '127.0.0.1'
d = utils.getProcessOutput('netstat', ['-rn'])
d.addCallback(result)
d.addErrback(fail)
return d
except Exception, msg:
import traceback
traceback.print_exc()
""" return localhost if we haven't found anything """
return '127.0.0.1'
def de_chunk_payload(response):
try:
import cStringIO as StringIO
except ImportError:
import StringIO
""" This method takes a chunked HTTP data object and unchunks it."""
newresponse = StringIO.StringIO()
# chunked encoding consists of a bunch of lines with
# a length in hex followed by a data chunk and a CRLF pair.
response = StringIO.StringIO(response)
def read_chunk_length():
line = response.readline()
try:
len = int(line.strip(),16)
except ValueError:
len = 0
return len
len = read_chunk_length()
while (len > 0):
newresponse.write(response.read(len))
line = response.readline() # after chunk and before next chunk length
len = read_chunk_length()
return newresponse.getvalue()
class Request(server.Request):
def process(self):
"Process a request."
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader('server', SERVER_ID)
self.setHeader('date', http.datetimeToString())
self.setHeader('content-type', "text/html")
# Resource Identification
self.prepath = []
self.postpath = map(unquote, self.path[1:].split('/'))
try:
def deferred_rendering(r):
self.render(r)
resrc = self.site.getResourceFor(self)
if isinstance(resrc, defer.Deferred):
resrc.addCallback(deferred_rendering)
resrc.addErrback(self.processingFailed)
else:
self.render(resrc)
except:
self.processingFailed(failure.Failure())
class Site(server.Site):
noisy = False
requestFactory = Request
def startFactory(self):
pass
#http._logDateTimeStart()
class ProxyClient(http.HTTPClient):
"""Used by ProxyClientFactory to implement a simple web proxy."""
def __init__(self, command, rest, version, headers, data, father):
self.father = father
self.command = command
self.rest = rest
if headers.has_key("proxy-connection"):
del headers["proxy-connection"]
#headers["connection"] = "close"
self.headers = headers
#print "command", command
#print "rest", rest
#print "headers", headers
self.data = data
self.send_data = 0
def connectionMade(self):
self.sendCommand(self.command, self.rest)
for header, value in self.headers.items():
self.sendHeader(header, value)
self.endHeaders()
self.transport.write(self.data)
def handleStatus(self, version, code, message):
if message:
# Add a whitespace to message, this allows empty messages
# transparently
message = " %s" % (message,)
if version == 'ICY':
version = 'HTTP/1.1'
#print "ProxyClient handleStatus", version, code, message
self.father.transport.write("%s %s %s\r\n" % (version, code, message))
def handleHeader(self, key, value):
#print "ProxyClient handleHeader", key, value
if not key.startswith('icy-'):
#print "ProxyClient handleHeader", key, value
self.father.transport.write("%s: %s\r\n" % (key, value))
def handleEndHeaders(self):
#self.father.transport.write("%s: %s\r\n" % ( 'Keep-Alive', ''))
#self.father.transport.write("%s: %s\r\n" % ( 'Accept-Ranges', 'bytes'))
#self.father.transport.write("%s: %s\r\n" % ( 'Content-Length', '2000000'))
#self.father.transport.write("%s: %s\r\n" % ( 'Date', 'Mon, 26 Nov 2007 11:04:12 GMT'))
#self.father.transport.write("%s: %s\r\n" % ( 'Last-Modified', 'Sun, 25 Nov 2007 23:19:51 GMT'))
##self.father.transport.write("%s: %s\r\n" % ( 'Server', 'Apache/2.0.52 (Red Hat)'))
self.father.transport.write("\r\n")
def handleResponsePart(self, buffer):
#print "ProxyClient handleResponsePart", len(buffer), self.father.chunked
self.send_data += len(buffer)
self.father.write(buffer)
def handleResponseEnd(self):
#print "handleResponseEnd", self.send_data
self.transport.loseConnection()
self.father.channel.transport.loseConnection()
class ProxyClientFactory(protocol.ClientFactory):
"""
Used by ProxyRequest to implement a simple web proxy.
"""
protocol = proxy.ProxyClient
def __init__(self, command, rest, version, headers, data, father):
self.father = father
self.command = command
self.rest = rest
self.headers = headers
self.data = data
self.version = version
def buildProtocol(self, addr):
return self.protocol(self.command, self.rest, self.version,
self.headers, self.data, self.father)
def clientConnectionFailed(self, connector, reason):
self.father.transport.write("HTTP/1.0 501 Gateway error\r\n")
self.father.transport.write("Content-Type: text/html\r\n")
self.father.transport.write("\r\n")
self.father.transport.write('''<H1>Could not connect</H1>''')
self.father.transport.loseConnection()
class ReverseProxyResource(proxy.ReverseProxyResource):
"""
Resource that renders the results gotten from another server
Put this resource in the tree to cause everything below it to be relayed
to a different server.
@ivar proxyClientFactoryClass: a proxy client factory class, used to create
new connections.
@type proxyClientFactoryClass: L{ClientFactory}
@ivar reactor: the reactor used to create connections.
@type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
"""
proxyClientFactoryClass = ProxyClientFactory
def __init__(self, host, port, path, reactor=reactor):
"""
@param host: the host of the web server to proxy.
@type host: C{str}
@param port: the port of the web server to proxy.
@type port: C{port}
@param path: the base path to fetch data from. Note that you shouldn't
put any trailing slashes in it, it will be added automatically in
request. For example, if you put B{/foo}, a request on B{/bar} will
be proxied to B{/foo/bar}.
@type path: C{str}
"""
resource.Resource.__init__(self)
self.host = host
self.port = port
self.path = path
self.qs = ''
self.reactor = reactor
def getChild(self, path, request):
return ReverseProxyResource(
self.host, self.port, self.path + '/' + path)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# RFC 2616 tells us that we can omit the port if it's the default port,
# but we have to provide it otherwise
if self.port == 80:
request.received_headers['host'] = self.host
else:
request.received_headers['host'] = "%s:%d" % (self.host, self.port)
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs == '':
qs = self.qs
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return server.NOT_DONE_YET
def resetTarget(self,host,port,path,qs=''):
self.host = host
self.port = port
self.path = path
self.qs = qs
class ReverseProxyUriResource(ReverseProxyResource):
uri = None
def __init__(self, uri, reactor=reactor):
self.uri = uri
_,host_port,path,params,_ = urlsplit(uri)
if host_port.find(':') != -1:
host,port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
if path =='':
path = '/'
if params == '':
rest = path
else:
rest = '?'.join((path, params))
ReverseProxyResource.__init__(self, host, port, rest, reactor)
def resetUri (self, uri):
self.uri = uri
_,host_port,path,params,_ = urlsplit(uri)
if host_port.find(':') != -1:
host,port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
self.resetTarget(host, port, path, params)
class myHTTPPageGetter(client.HTTPPageGetter):
followRedirect = True
def connectionMade(self):
method = getattr(self, 'method', 'GET')
#print "myHTTPPageGetter", method, self.factory.path
self.sendCommand(method, self.factory.path)
self.sendHeader('Host', self.factory.headers.get("host", self.factory.host))
self.sendHeader('User-Agent', self.factory.agent)
if self.factory.cookies:
l=[]
for cookie, cookval in self.factory.cookies.items():
l.append('%s=%s' % (cookie, cookval))
self.sendHeader('Cookie', '; '.join(l))
data = getattr(self.factory, 'postdata', None)
if data is not None:
self.sendHeader("Content-Length", str(len(data)))
for (key, value) in self.factory.headers.items():
if key.lower() != "content-length":
# we calculated it on our own
self.sendHeader(key, value)
self.endHeaders()
self.headers = {}
if data is not None:
self.transport.write(data)
def handleResponse(self, response):
if self.quietLoss:
return
if self.failed:
self.factory.noPage(
failure.Failure(
error.Error(
self.status, self.message, response)))
elif self.factory.method != 'HEAD' and self.length != None and self.length != 0:
self.factory.noPage(failure.Failure(
client.PartialDownloadError(self.status, self.message, response)))
else:
if(self.headers.has_key('transfer-encoding') and
self.headers['transfer-encoding'][0].lower() == 'chunked'):
self.factory.page(de_chunk_payload(response))
else:
self.factory.page(response)
# server might be stupid and not close connection. admittedly
# the fact we do only one request per connection is also
# stupid...
self.quietLoss = 1
self.transport.loseConnection()
class HeaderAwareHTTPClientFactory(client.HTTPClientFactory):
protocol = myHTTPPageGetter
noisy = False
def __init__(self, url, method='GET', postdata=None, headers=None,
agent="Twisted PageGetter", timeout=0, cookies=None,
followRedirect=True, redirectLimit=20):
self.followRedirect = followRedirect
self.redirectLimit = redirectLimit
self._redirectCount = 0
self.timeout = timeout
self.agent = agent
if cookies is None:
cookies = {}
self.cookies = cookies
if headers is not None:
self.headers = InsensitiveDict(headers)
else:
self.headers = InsensitiveDict()
if postdata is not None:
self.headers.setdefault('Content-Length', len(postdata))
# just in case a broken http/1.1 decides to keep connection alive
self.headers.setdefault("connection", "close")
self.postdata = postdata
self.method = method
self.setURL(url)
self.waiting = 1
self.deferred = defer.Deferred()
self.response_headers = None
def buildProtocol(self, addr):
p = protocol.ClientFactory.buildProtocol(self, addr)
p.method = self.method
p.followRedirect = self.followRedirect
if self.timeout:
timeoutCall = reactor.callLater(self.timeout, p.timeout)
self.deferred.addBoth(self._cancelTimeout, timeoutCall)
return p
def page(self, page):
if self.waiting:
self.waiting = 0
self.deferred.callback((page, self.response_headers))
class HeaderAwareHTTPDownloader(client.HTTPDownloader):
def gotHeaders(self, headers):
self.value = headers
if self.requestedPartial:
contentRange = headers.get("content-range", None)
if not contentRange:
# server doesn't support partial requests, oh well
self.requestedPartial = 0
return
start, end, realLength = http.parseContentRange(contentRange[0])
if start != self.requestedPartial:
# server is acting wierdly
self.requestedPartial = 0
def getPage(url, contextFactory=None, *args, **kwargs):
"""Download a web page as a string.
Download a page. Return a deferred, which will callback with a
page (as a string) or errback with a description of the error.
See HTTPClientFactory to see what extra args can be passed.
"""
scheme, host, port, path = client._parse(url)
factory = HeaderAwareHTTPClientFactory(url, *args, **kwargs)
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory)
return factory.deferred
def downloadPage(url, file, contextFactory=None, *args, **kwargs):
"""Download a web page to a file.
@param file: path to file on filesystem, or file-like object.
See HTTPDownloader to see what extra args can be passed.
"""
scheme, host, port, path = client._parse(url)
factory = HeaderAwareHTTPDownloader(url, file, *args, **kwargs)
factory.noisy = False
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory)
return factory.deferred
class StaticFile(static.File):
""" taken from twisted.web.static and modified
accordingly to the patch by John-Mark Gurney
http://resnet.uoregon.edu/~gurney_j/jmpc/dist/twisted.web.static.patch
"""
def render(self, request):
#print ""
#print "StaticFile", request
#print "StaticFile in", request.received_headers
"""You know what you doing."""
self.restat()
if self.type is None:
self.type, self.encoding = static.getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
#for content-length
fsize = size = self.getFileSize()
request.setHeader('accept-ranges','bytes')
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
try:
f = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return error.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
trans = True
range = request.getHeader('range')
#print "StaticFile", range
tsize = size
if range is not None:
# This is a request for partial data...
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes',\
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
if start:
f.seek(int(start))
if end:
end = int(end)
else:
end = size - 1
else:
lastbytes = int(end)
if size < lastbytes:
lastbytes = size
start = size - lastbytes
f.seek(start)
fsize = lastbytes
end = size - 1
size = end + 1
fsize = end - int(start) + 1
# start is the byte offset to begin, and end is the byte offset
# to end.. fsize is size to send, tsize is the real size of
# the file, and size is the byte position to stop sending.
if fsize <= 0:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
fsize = tsize
trans = False
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader('content-range',"bytes %s-%s/%s " % (
str(start), str(end), str(tsize)))
#print "StaticFile", start, end, tsize
request.setHeader('content-length', str(fsize))
if request.method == 'HEAD' or trans == False:
# pretend we're a HEAD request, so content-length
# won't be overwritten.
#print "HEAD request"
request.method = 'HEAD'
return ''
#print "StaticFile out", request.headers, request.code
# return data
# size is the byte position to stop sending, not how many bytes to send
static.FileTransfer(f, size, request)
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
class BufferFile(static.File):
""" taken from twisted.web.static and modified
accordingly to the patch by John-Mark Gurney
http://resnet.uoregon.edu/~gurney_j/jmpc/dist/twisted.web.static.patch
"""
def __init__(self, path, target_size=0, *args):
static.File.__init__(self, path, *args)
self.target_size = target_size
self.upnp_retry = None
def render(self, request):
#print ""
#print "BufferFile", request
# FIXME detect when request is REALLY finished
if request is None or request.finished :
print "No request to render!"
return ''
"""You know what you doing."""
self.restat()
if self.type is None:
self.type, self.encoding = static.getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
#for content-length
if (self.target_size > 0):
fsize = size = int(self.target_size)
else:
fsize = size = int(self.getFileSize())
#print fsize
if size == int(self.getFileSize()):
request.setHeader('accept-ranges','bytes')
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
try:
f = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return error.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
trans = True
range = request.getHeader('range')
#print "StaticFile", range
tsize = size
if range is not None:
# This is a request for partial data...
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes',\
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
if start:
start = int(start)
# Are we requesting something beyond the current size of the file?
if (start >= self.getFileSize()):
# Retry later!
print bytesrange
print "Requesting data beyond current scope -> postpone rendering!"
self.upnp_retry = reactor.callLater(1.0, self.render, request)
return server.NOT_DONE_YET
f.seek(start)
if end:
#print ":%s" % end
end = int(end)
else:
end = size - 1
else:
lastbytes = int(end)
if size < lastbytes:
lastbytes = size
start = size - lastbytes
f.seek(start)
fsize = lastbytes
end = size - 1
size = end + 1
fsize = end - int(start) + 1
# start is the byte offset to begin, and end is the byte offset
# to end.. fsize is size to send, tsize is the real size of
# the file, and size is the byte position to stop sending.
if fsize <= 0:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
fsize = tsize
trans = False
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader('content-range',"bytes %s-%s/%s " % (
str(start), str(end), str(tsize)))
#print "StaticFile", start, end, tsize
request.setHeader('content-length', str(fsize))
if request.method == 'HEAD' or trans == False:
# pretend we're a HEAD request, so content-length
# won't be overwritten.
request.method = 'HEAD'
return ''
#print "StaticFile out", request.headers, request.code
# return data
# size is the byte position to stop sending, not how many bytes to send
BufferFileTransfer(f, size - f.tell(), request)
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
class BufferFileTransfer(object):
"""
A class to represent the transfer of a file over the network.
"""
request = None
def __init__(self, file, size, request):
self.file = file
self.size = size
self.request = request
self.written = self.file.tell()
request.registerProducer(self, 0)
def resumeProducing(self):
#print "resumeProducing", self.request,self.size,self.written
if not self.request:
return
data = self.file.read(min(abstract.FileDescriptor.bufferSize, self.size - self.written))
if data:
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.file.tell() == self.size:
self.request.unregisterProducer()
self.request.finish()
self.request = None
def pauseProducing(self):
pass
def stopProducing(self):
#print "stopProducing",self.request
self.request.unregisterProducer()
self.file.close()
self.request.finish()
self.request = None
from datetime import datetime, tzinfo, timedelta
import random
class CET(tzinfo):
def __init__(self):
self.__offset = timedelta(minutes=60)
self.__name = 'CET'
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self,dt):
return timedelta(0)
class CEST(tzinfo):
def __init__(self):
self.__offset = timedelta(minutes=120)
self.__name = 'CEST'
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self,dt):
return timedelta(0)
bdates = [ datetime(1997,2,28,17,20,tzinfo=CET()), # Sebastian Oliver
datetime(1999,9,19,4,12,tzinfo=CEST()), # Patrick Niklas
datetime(2000,9,23,4,8,tzinfo=CEST()), # Saskia Alexa
datetime(2003,7,23,1,18,tzinfo=CEST()), # Mara Sophie
# you are the best!
]
def datefaker():
return random.choice(bdates)
| {
"content_hash": "fc815043b49786d2b36f33519f00f547",
"timestamp": "",
"source": "github",
"line_count": 916,
"max_line_length": 104,
"avg_line_length": 33.2467248908297,
"alnum_prop": 0.570302751691075,
"repo_name": "palfrey/coherence",
"id": "7df68746ed00f8b9692fa7d6b0b40e6f166b2fa6",
"size": "30647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coherence/upnp/core/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1240247"
},
{
"name": "Shell",
"bytes": "1566"
}
],
"symlink_target": ""
} |
from .ast import *
class NodeFactory(object):
"""
Encapsulates abstract syntax tree node creation.
"""
def is_valid_left_hand_side(self, expression):
return isinstance(expression, Node) and expression.is_valid_left_hand_side()
#
# Statements
#
def create_block(self, statements):
return Block(statements)
def create_do_while_statement(self, condition, body):
return DoWhileStatement(condition, body)
def create_while_statement(self, condition, body):
return WhileStatement(condition, body)
def create_for_statement(self, initializer, condition, next, body):
return ForStatement(initializer, condition, next, body)
def create_for_in_statement(self, each, enumerable, body):
return ForInStatement(each, enumerable, body)
def create_expression_statement(self, expression):
return ExpressionStatement(expression)
def create_labelled_statement(self, label, statement):
return LabelledStatement(label, statement)
def create_continue_statement(self, target):
return ContinueStatement(target)
def create_break_statement(self, target):
return BreakStatement(target)
def create_return_statement(self, expression):
return ReturnStatement(expression)
def create_case_clause(self, label, statements):
return CaseClause(label, statements)
def create_switch_statement(self, expression, cases):
return SwitchStatement(expression, cases)
def create_if_statement(self, condition, then_statement, else_statement):
return IfStatement(condition, then_statement, else_statement)
def create_try_statement(self, try_block, catch_var, catch_block, finally_block):
return TryStatement(try_block, catch_var, catch_block, finally_block)
def create_with_statement(self, expression, statement):
return WithStatement(expression, statement)
def create_variable_declaration(self, name, value):
return VariableDeclaration(name, value)
def create_variable_statement(self, declarations):
return VariableStatement(declarations)
def create_empty_statement(self):
return EmptyStatement()
#
# Expressions
#
def create_null_node(self):
return NullNode()
def create_true_node(self):
return TrueNode()
def create_false_node(self):
return FalseNode()
def create_this_node(self):
return ThisNode()
def create_name(self, value):
return Name(value)
def create_string_literal(self, value):
return StringLiteral(value)
def create_number_literal(self, value):
return NumberLiteral(value)
def create_object_literal(self, properties):
return ObjectLiteral(properties)
def create_object_property(self, name, value):
return ObjectProperty(name, value)
def create_property_name(self, value):
return PropertyName(value)
def create_property_getter(self, name, body):
return PropertyGetter(name, body)
def create_property_setter(self, name, parameter, body):
return PropertySetter(name, parameter, body)
def create_regexp_literal(self, pattern, flags):
return RegExpLiteral(pattern, flags)
def create_array_literal(self, elements):
return ArrayLiteral(elements)
def create_elision(self):
return Elision()
def create_dot_property(self, object, key):
return DotProperty(object, key)
def create_bracket_property(self, object, key):
return BracketProperty(object, key)
def create_call_expression(self, expression, arguments):
return CallExpression(expression, arguments)
def create_new_expression(self, expression, arguments):
return NewExpression(expression, arguments)
def create_unary_operation(self, op, expression):
return UnaryOperation(op, expression)
def create_typeof_operation(self, expression):
return TypeofOperation(expression)
def create_delete_operation(self, expression):
return DeleteOperation(expression)
def create_void_operation(self, expression):
return VoidOperation(expression)
def create_prefix_count_operation(self, op, expression):
return PrefixCountOperation(op, expression)
def create_postfix_count_operation(self, op, expression):
return PostfixCountOperation(op, expression)
def create_binary_operation(self, op, left, right):
return BinaryOperation(op, left, right)
def create_compare_operation(self, op, left, right):
return CompareOperation(op, left, right)
def create_conditional(self, condition, then_expression, else_expression):
return Conditional(condition, then_expression, else_expression)
def create_assignment(self, op, target, value):
return Assignment(op, target, value)
def create_throw(self, exception):
return Throw(exception)
def create_function_declaration(self, name, parameters, body):
return FunctionDeclaration(name, parameters, body)
def create_function_expression(self, name, parameters, body):
return FunctionExpression(name, parameters, body)
def create_parameters(self, parameters):
return parameters
def create_source_elements(self, statements):
return SourceElements(statements)
def create_program(self, statements):
return Program(statements)
| {
"content_hash": "adf38b79f11f02b22db45562d1971472",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 85,
"avg_line_length": 30.90449438202247,
"alnum_prop": 0.6953281221596074,
"repo_name": "jeffkistler/BigRig",
"id": "cbfb91344146274d3b024f3007e36f5051effef4",
"size": "5501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigrig/parser/factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "364776"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use `airflow.providers.http.operators.http`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.http.operators.http import SimpleHttpOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.http.operators.http`.",
DeprecationWarning,
stacklevel=2,
)
| {
"content_hash": "281bcba2ff05e4a4973c57e063ea73f9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.7625698324022346,
"repo_name": "airbnb/airflow",
"id": "34e938144f009937a408a3038aec2445330e89e4",
"size": "1145",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/operators/http_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
} |
import json
import os
import sys
from pyjavaproperties import Properties
def main():
if len(sys.argv) == 1:
sys.exit('Usage: {0} <file.properties>'.format(sys.argv[0]))
if not os.path.exists(sys.argv[1]):
sys.exit('Error: {0} does not exist !'.format(sys.argv[1]))
p = Properties()
p.load(open(sys.argv[1]))
hex_sizes = p['hex.sizes'].split(';')
hex_sizes = [hs.split(',')[0] for hs in hex_sizes]
mappings = p['index.mapping'].split('/')[1]
properties = {}
fields = p['fields']
for field in fields.split(';'):
tokens = field.split(',')
if tokens[0] == 'geo':
name = tokens[1]
properties[name] = {"type": "geo_point"}
properties[name + "_xm"] = {"type": "float", "index": "no"}
properties[name + "_ym"] = {"type": "float", "index": "no"}
for hs in hex_sizes:
properties[name + "_" + hs] = {"type": "string", "index": "not_analyzed"}
elif tokens[0] == 'grid':
name = tokens[1]
properties[name] = {"type": "geo_point"}
properties[name + "_xm"] = {"type": "float", "index": "no"}
properties[name + "_ym"] = {"type": "float", "index": "no"}
properties[name + "_g"] = {"type": "string", "index": "not_analyzed"}
elif tokens[0] == 'int':
properties[tokens[1]] = {"type": "integer"}
elif tokens[0] == 'long':
properties[tokens[1]] = {"type": "long"}
elif tokens[0] == 'float':
properties[tokens[1]] = {"type": "float"}
elif tokens[0] == 'double':
properties[tokens[1]] = {"type": "float"}
elif tokens[0] == 'date' or tokens[0] == 'date-time':
name = tokens[1]
date_format = tokens[3] if len(tokens) == 4 else "YYYY-MM-dd HH:mm:ss"
properties[name] = {"type": "date", "format": date_format}
properties[name + "_yy"] = {"type": "integer"}
properties[name + "_mm"] = {"type": "integer"}
properties[name + "_dd"] = {"type": "integer"}
properties[name + "_hh"] = {"type": "integer"}
properties[name + "_dow"] = {"type": "integer"}
elif tokens[0] == 'date-iso':
name = tokens[1]
date_format = tokens[3] if len(tokens) == 4 else "date_optional_time"
properties[name] = {"type": "date", "format": date_format}
properties[name + "_yy"] = {"type": "integer"}
properties[name + "_mm"] = {"type": "integer"}
properties[name + "_dd"] = {"type": "integer"}
properties[name + "_hh"] = {"type": "integer"}
properties[name + "_dow"] = {"type": "integer"}
elif tokens[0] == 'date-only':
name = tokens[1]
date_format = tokens[3] if len(tokens) == 4 else "YYYY-MM-dd HH:mm:ss"
properties[name] = {"type": "date", "format": date_format}
else:
name = tokens[1]
properties[name] = {"type": "string", "index": "not_analyzed"}
doc = {
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"refresh_interval": "-1",
"auto_expand_replicas": "false"
},
"mappings": {mappings: {
"_all": {
"enabled": False
},
"_source": {
"enabled": True
},
"properties": properties
}}
}
basename, extension = os.path.splitext(sys.argv[1])
with open(basename + ".json", "wb") as fw:
fw.write(json.dumps(doc, ensure_ascii=False, indent=2))
base = os.path.basename(sys.argv[1])
name = os.path.splitext(base)[0]
print("PUT Example: curl -XPUT localhost:9200/{0}?pretty -d @{1}.json".format(name.lower(), basename))
if __name__ == '__main__':
main()
| {
"content_hash": "d63bd61bbd2afe5586cc594ec75d7af6",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 106,
"avg_line_length": 38.28431372549019,
"alnum_prop": 0.4937259923175416,
"repo_name": "mraad/spark-csv-es",
"id": "ba6942c3973a08f990ea53720dc9fb9a3b702d42",
"size": "3962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/gen-mapping.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "700"
},
{
"name": "Python",
"bytes": "19457"
},
{
"name": "Scala",
"bytes": "41864"
},
{
"name": "Shell",
"bytes": "2826"
}
],
"symlink_target": ""
} |
"""
Listen to realtime robot data
Vers:20140307
Input:
listen (in) [Generic Data] - True to listen to robot
datatype (in, optional) [Generic Data] - Data to listen to e.g. "actual_joints_pos", "tool_pose"
id (in) [Generic Data] - Robot ID: 1/2/3
Returns:
out [Text] - The execution information, as output and error streams
a [Generic Data] - Script variable Python
"""
import comm
from Grasshopper.Kernel import GH_RuntimeMessageLevel as gh_msg
error_inputs = []
if listen is None: error_inputs.append('listen')
if not id: error_inputs.append('id')
if not error_inputs:
ip = '192.168.10.%d'%(10 * int(id) + 3)
if listen:
if datatype == 0:
a = comm.listen(ip)['tool_pose']
elif datatype == 1:
a = comm.listen(ip)['actual_joints_pos']
else:
a = ["{0} {1}".format(k,v) for k,v in comm.listen(ip).iteritems()]
else:
error_message = 'Failed to collect data for {0} required input(s): {1}'.format(len(error_inputs), ','.join(error_inputs))
ghenv.Component.AddRuntimeMessage(gh_msg.Warning, error_message) | {
"content_hash": "c95a140a589acbaa9913e8122c84d0b6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 125,
"avg_line_length": 37.06666666666667,
"alnum_prop": 0.6357913669064749,
"repo_name": "tclim/your",
"id": "faeefddd5152d7398794ff0b08661b884bff9330",
"size": "1112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "your_components/YourListener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "6942"
},
{
"name": "CSS",
"bytes": "16345"
},
{
"name": "HTML",
"bytes": "326814"
},
{
"name": "JavaScript",
"bytes": "10874"
},
{
"name": "Python",
"bytes": "66924"
}
],
"symlink_target": ""
} |
import pickle
import unittest
from geopy.location import Location
from geopy.point import Point
GRAND_CENTRAL_STR = "89 E 42nd St New York, NY 10017"
GRAND_CENTRAL_COORDS_STR = "40.752662,-73.9773"
GRAND_CENTRAL_COORDS_TUPLE = (40.752662, -73.9773, 0)
GRAND_CENTRAL_POINT = Point(GRAND_CENTRAL_COORDS_STR)
GRAND_CENTRAL_RAW = {
'id': '1',
'class': 'place',
'lat': '40.752662',
'lon': '-73.9773',
'display_name':
"89, East 42nd Street, New York, "
"New York, 10017, United States of America",
}
class LocationTestCase(unittest.TestCase):
def _location_iter_test(
self,
loc,
ref_address=GRAND_CENTRAL_STR,
ref_longitude=GRAND_CENTRAL_COORDS_TUPLE[0],
ref_latitude=GRAND_CENTRAL_COORDS_TUPLE[1]
):
address, (latitude, longitude) = loc
self.assertEqual(address, ref_address)
self.assertEqual(latitude, ref_longitude)
self.assertEqual(longitude, ref_latitude)
def _location_properties_test(self, loc, raw=None):
self.assertEqual(loc.address, GRAND_CENTRAL_STR)
self.assertEqual(loc.latitude, GRAND_CENTRAL_COORDS_TUPLE[0])
self.assertEqual(loc.longitude, GRAND_CENTRAL_COORDS_TUPLE[1])
self.assertEqual(loc.altitude, GRAND_CENTRAL_COORDS_TUPLE[2])
if raw is not None:
self.assertEqual(loc.raw, raw)
def test_location_str(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_STR, {})
self._location_iter_test(loc)
self.assertEqual(loc.point, GRAND_CENTRAL_POINT)
def test_location_point(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self._location_iter_test(loc)
self.assertEqual(loc.point, GRAND_CENTRAL_POINT)
def test_location_none(self):
with self.assertRaises(TypeError):
Location(GRAND_CENTRAL_STR, None, {})
def test_location_iter(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_TUPLE, {})
self._location_iter_test(loc)
self.assertEqual(loc.point, GRAND_CENTRAL_POINT)
def test_location_point_typeerror(self):
with self.assertRaises(TypeError):
Location(GRAND_CENTRAL_STR, 1, {})
def test_location_array_access(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_TUPLE, {})
self.assertEqual(loc[0], GRAND_CENTRAL_STR)
self.assertEqual(loc[1][0], GRAND_CENTRAL_COORDS_TUPLE[0])
self.assertEqual(loc[1][1], GRAND_CENTRAL_COORDS_TUPLE[1])
def test_location_properties(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self._location_properties_test(loc)
def test_location_raw(self):
loc = Location(
GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, raw=GRAND_CENTRAL_RAW
)
self._location_properties_test(loc, GRAND_CENTRAL_RAW)
def test_location_string(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self.assertEqual(str(loc), loc.address)
def test_location_len(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self.assertEqual(len(loc), 2)
def test_location_eq(self):
loc1 = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
loc2 = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_TUPLE, {})
self.assertEqual(loc1, loc2)
def test_location_ne(self):
loc1 = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
loc2 = Location(GRAND_CENTRAL_STR, Point(0, 0), {})
self.assertNotEqual(loc1, loc2)
def test_location_repr(self):
address = (
"22, Ksi\u0119dza Paw\u0142a Po\u015bpiecha, "
"Centrum Po\u0142udnie, Zabrze, wojew\xf3dztwo "
"\u015bl\u0105skie, 41-800, Polska"
)
point = (0.0, 0.0, 0.0)
loc = Location(address, point, {})
self.assertEqual(
repr(loc),
"Location(%s, %r)" % (address, point)
)
def test_location_is_picklable(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
# https://docs.python.org/2/library/pickle.html#data-stream-format
for protocol in (0, 1, 2, -1):
pickled = pickle.dumps(loc, protocol=protocol)
loc_unp = pickle.loads(pickled)
self.assertEqual(loc, loc_unp)
def test_location_with_unpicklable_raw(self):
some_class = type('some_class', (object,), {})
raw_unpicklable = dict(missing=some_class())
del some_class
loc_unpicklable = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT,
raw_unpicklable)
for protocol in (0, 1, 2, -1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickle.dumps(loc_unpicklable, protocol=protocol)
| {
"content_hash": "d85d200541d7d699e3090ef84363c010",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 75,
"avg_line_length": 37.13636363636363,
"alnum_prop": 0.6264789881680947,
"repo_name": "geopy/geopy",
"id": "bd2b2c927ca5ac75e78dba2e038d16c634297145",
"size": "4902",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1574"
},
{
"name": "Python",
"bytes": "565428"
}
],
"symlink_target": ""
} |
import sys
import RPi.GPIO as io, time, os
io.setmode(io.BCM)
pin_number = 17
def RCtime (RCpin):
reading = 0
io.setup(RCpin, io.OUT)
io.output(RCpin, io.LOW)
time.sleep(0.1)
io.setup(RCpin, io.IN)
while (io.input(RCpin) == io.LOW):
reading += 1
return reading
while True:
sys.stdout.write(str(RCtime(pin_number)))
sys.stdout.flush() | {
"content_hash": "71d36a4b2319a28599ae6029121182b0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 49,
"avg_line_length": 20.285714285714285,
"alnum_prop": 0.5610328638497653,
"repo_name": "kelvien/japri",
"id": "0397e67ad0c73c1e313203513b24f777adae6b5e",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/PhotoResistor/PhotoResistor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "300763"
},
{
"name": "CoffeeScript",
"bytes": "1254"
},
{
"name": "HTML",
"bytes": "50979"
},
{
"name": "JavaScript",
"bytes": "3859199"
},
{
"name": "Python",
"bytes": "9754"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
import tests.testapp.models
import wagtail.core.blocks
import wagtail.core.fields
import wagtailmedia.blocks
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0059_apply_collection_ordering"),
("wagtailmedia", "0004_duration_optional_floatfield"),
("wagtailmedia_tests", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="BlogStreamPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.page",
),
),
("author", models.CharField(max_length=255)),
("date", models.DateField(verbose_name="Post date")),
(
"body",
wagtail.core.fields.StreamField(
[
(
"heading",
wagtail.core.blocks.CharBlock(
form_classname="title", icon="title"
),
),
(
"paragraph",
wagtail.core.blocks.RichTextBlock(icon="pilcrow"),
),
(
"media",
tests.testapp.models.TestMediaBlock(icon="media"),
),
(
"video",
wagtailmedia.blocks.VideoChooserBlock(icon="media"),
),
(
"audio",
wagtailmedia.blocks.AudioChooserBlock(icon="media"),
),
]
),
),
(
"featured_media",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="wagtailmedia.media",
),
),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
]
| {
"content_hash": "2f83e040a534b2b5c63bfebc4e8e87d8",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 84,
"avg_line_length": 35.55844155844156,
"alnum_prop": 0.36303871439006574,
"repo_name": "torchbox/wagtailmedia",
"id": "48999d957998c54e54808f5b142588e7659d70af",
"size": "2787",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/testapp/migrations/0002_blogstreampage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "993"
},
{
"name": "HTML",
"bytes": "43344"
},
{
"name": "JavaScript",
"bytes": "20328"
},
{
"name": "Makefile",
"bytes": "972"
},
{
"name": "Python",
"bytes": "206542"
}
],
"symlink_target": ""
} |
import signal
import sys
import ssl
import logging
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer, SimpleSSLWebSocketServer
from optparse import OptionParser
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
class SimpleEcho(WebSocket):
def handleMessage(self):
if self.data is None:
self.data = ''
try:
self.sendMessage(str(self.data))
except Exception as n:
print n
def handleConnected(self):
print self.address, 'connected'
def handleClose(self):
print self.address, 'closed'
class SimpleChat(WebSocket):
def handleMessage(self):
if self.data is None:
self.data = ''
for client in self.server.connections.itervalues():
if client != self:
try:
client.sendMessage(str(self.address[0]) + ' - ' + str(self.data))
except Exception as n:
print n
def handleConnected(self):
print self.address, 'connected'
for client in self.server.connections.itervalues():
if client != self:
try:
client.sendMessage(str(self.address[0]) + ' - connected')
except Exception as n:
print n
def handleClose(self):
print self.address, 'closed'
for client in self.server.connections.itervalues():
if client != self:
try:
client.sendMessage(str(self.address[0]) + ' - disconnected')
except Exception as n:
print n
if __name__ == "__main__":
parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0")
parser.add_option("--host", default='', type='string', action="store", dest="host", help="hostname (localhost)")
parser.add_option("--port", default=8000, type='int', action="store", dest="port", help="port (8000)")
parser.add_option("--example", default='echo', type='string', action="store", dest="example", help="echo, chat")
parser.add_option("--ssl", default=0, type='int', action="store", dest="ssl", help="ssl (1: on, 0: off (default))")
parser.add_option("--cert", default='./cert.pem', type='string', action="store", dest="cert", help="cert (./cert.pem)")
parser.add_option("--ver", default=ssl.PROTOCOL_TLSv1, type=int, action="store", dest="ver", help="ssl version")
(options, args) = parser.parse_args()
cls = SimpleEcho
if options.example == 'chat':
cls = SimpleChat
if options.ssl == 1:
server = SimpleSSLWebSocketServer(options.host, options.port, cls, options.cert, options.cert, version=options.ver)
else:
server = SimpleWebSocketServer(options.host, options.port, cls)
def close_sig_handler(signal, frame):
server.close()
sys.exit()
signal.signal(signal.SIGINT, close_sig_handler)
server.serveforever()
| {
"content_hash": "f2c6737748edc8775e6f1c58024ec30d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 123,
"avg_line_length": 33.96590909090909,
"alnum_prop": 0.6022080963532954,
"repo_name": "CursosWeb/X-Nav-5.7.16-WebSocket-Chat",
"id": "5eebe6cddc36aa33646c66699e6a574f7e016e6e",
"size": "3140",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "SimpleExampleServer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1207"
},
{
"name": "Python",
"bytes": "22213"
}
],
"symlink_target": ""
} |
from itertools import chain
import requests
from nalaf.structures.data import Document, Part
from nalaf.utils.cache import Cacheable
from xml.etree import ElementTree as ET
class DownloadArticle(Cacheable):
"""
A utility generator that for a given iterable of PMIDs generates Document objects
created by downloading the articles associated with the pmid.
"""
def __init__(self, one_part=False):
super().__init__()
self.one_part = one_part
"""whether to put everything (title, abstract, etc.) under the same part joined with new line"""
self.pubmed_url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
self.is_timed = False
def download(self, pmids):
for pmid in pmids:
if pmid in self.cache:
xml = ET.fromstring(self.cache[pmid])
else:
req = requests.get(self.pubmed_url, {'db': 'pubmed', 'retmode': 'xml', 'id': pmid})
text = req.text
xml = ET.fromstring(text)
self.cache[pmid] = text
doc = Document()
if self.one_part:
joined_text = '\n'.join(element.text for element in
chain(xml.findall('.//ArticleTitle'), xml.findall('.//AbstractText')))
doc.parts['title_and_abstract'] = Part(joined_text)
else:
# for now only include title and abstract
title_elem = xml.find('.//ArticleTitle')
if title_elem is not None:
doc.parts['title'] = Part(title_elem.text)
abstract_elem = xml.findall('.//AbstractText')
if abstract_elem is not None:
abstract_elems = []
for elem in abstract_elem:
if 'Label' in elem.attrib and elem.attrib['Label'] != 'UNLABELLED':
abstract_elems.append('{}: {}'.format(elem.attrib['Label'], elem.text))
else:
abstract_elems.append(elem.text)
abstract_elems = filter(None, abstract_elems)
doc.parts['abstract'] = Part(' '.join(abstract_elems))
# yield the document but only if you found anything
if len(doc.parts) > 0:
yield pmid, doc
| {
"content_hash": "e3ea4720186c63303fcf377644894ee6",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 110,
"avg_line_length": 39.7,
"alnum_prop": 0.5428211586901763,
"repo_name": "Rostlab/nalaf",
"id": "e312bd31c00447890b417be34f81ff3292dd3aec",
"size": "2382",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nalaf/utils/download.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "467923"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9988")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| {
"content_hash": "39fe9ee12d6e8692577746d3a7d49c9d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 39.5,
"alnum_prop": 0.759493670886076,
"repo_name": "execoin/execoin",
"id": "3135b196a6ce4919ef457f480855b428ca316b8a",
"size": "158",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletunlock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "91804"
},
{
"name": "C++",
"bytes": "2544132"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69709"
},
{
"name": "Shell",
"bytes": "9813"
},
{
"name": "TypeScript",
"bytes": "5232065"
}
],
"symlink_target": ""
} |
import sys
DEBUG = False
class Logger:
def __init__(self, output=True):
self.indent = 0
self.output = output
self.lines = []
def quite(self):
self.output = False
def loud(self):
self.output = True
def write(self, text):
text = ' '*self.indent + text
if self.output:
sys.stdout.write(text)
self.lines.append(text)
logger = Logger(DEBUG)
# vim: et sw=4 sts=4
| {
"content_hash": "97b64cbe8bd64933835c4d3c6e08cc94",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 37,
"avg_line_length": 18.24,
"alnum_prop": 0.5504385964912281,
"repo_name": "jaredly/codetalker",
"id": "3ce4304864e426993e2370bb9d32ded0ad1d2965",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codetalker/pgm/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83304"
},
{
"name": "JavaScript",
"bytes": "8881"
},
{
"name": "Python",
"bytes": "109186"
},
{
"name": "Shell",
"bytes": "790"
}
],
"symlink_target": ""
} |
"""
This module provides classes used to define a non-periodic molecule and a
periodic structure.
"""
from __future__ import annotations
import collections
import functools
import itertools
import json
import math
import os
import random
import re
import warnings
from abc import ABCMeta, abstractmethod
from fnmatch import fnmatch
from io import StringIO
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Sequence,
Set,
Union,
)
import numpy as np
from monty.dev import deprecated
from monty.io import zopen
from monty.json import MSONable
from ruamel.yaml import YAML
from tabulate import tabulate
from pymatgen.core.bonds import CovalentBond, get_bond_length
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice, get_points_in_spheres
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.sites import PeriodicSite, Site
from pymatgen.core.units import Length, Mass
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
from pymatgen.util.coord import all_distances, get_angle, lattice_points_in_supercell
from pymatgen.util.typing import ArrayLike, CompositionLike, SpeciesLike
class Neighbor(Site):
"""
Simple Site subclass to contain a neighboring atom that skips all the unnecessary checks for speed. Can be
used as a fixed-length tuple of size 3 to retain backwards compatibility with past use cases.
(site, nn_distance, index).
In future, usage should be to call attributes, e.g., Neighbor.index, Neighbor.distance, etc.
"""
def __init__(
self,
species: Composition,
coords: np.ndarray,
properties: dict = None,
nn_distance: float = 0.0,
index: int = 0,
):
"""
:param species: Same as Site
:param coords: Same as Site, but must be fractional.
:param properties: Same as Site
:param nn_distance: Distance to some other Site.
:param index: Index within structure.
"""
self.coords = coords
self._species = species
self.properties = properties or {}
self.nn_distance = nn_distance
self.index = index
def __len__(self) -> Literal[3]:
"""
Make neighbor Tuple-like to retain backwards compatibility.
"""
return 3
def __getitem__(self, idx: int):
"""Make neighbor Tuple-like to retain backwards compatibility."""
return (self, self.nn_distance, self.index)[idx]
class PeriodicNeighbor(PeriodicSite):
"""
Simple PeriodicSite subclass to contain a neighboring atom that skips all
the unnecessary checks for speed. Can be used as a fixed-length tuple of
size 4 to retain backwards compatibility with past use cases.
(site, distance, index, image).
In future, usage should be to call attributes, e.g., PeriodicNeighbor.index,
PeriodicNeighbor.distance, etc.
"""
def __init__(
self,
species: Composition,
coords: np.ndarray,
lattice: Lattice,
properties: dict = None,
nn_distance: float = 0.0,
index: int = 0,
image: tuple = (0, 0, 0),
):
"""
Args:
species (Composition): Same as PeriodicSite
coords (np.ndarray): Same as PeriodicSite, but must be fractional.
lattice (Lattice): Same as PeriodicSite
properties (dict, optional): Same as PeriodicSite. Defaults to None.
nn_distance (float, optional): Distance to some other Site.. Defaults to 0.0.
index (int, optional): Index within structure.. Defaults to 0.
image (tuple, optional): PeriodicImage. Defaults to (0, 0, 0).
"""
self._lattice = lattice
self._frac_coords = coords
self._species = species
self.properties = properties or {}
self.nn_distance = nn_distance
self.index = index
self.image = image
@property # type: ignore
def coords(self) -> np.ndarray: # type: ignore
"""
:return: Cartesian coords.
"""
return self._lattice.get_cartesian_coords(self._frac_coords)
def __len__(self):
"""
Make neighbor Tuple-like to retain backwards compatibility.
"""
return 4
def __getitem__(self, i: int):
"""
Make neighbor Tuple-like to retain backwards compatibility.
"""
return (self, self.nn_distance, self.index, self.image)[i]
class SiteCollection(collections.abc.Sequence, metaclass=ABCMeta):
"""
Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.
This serves as a base class for Molecule (a collection of Site, i.e., no
periodicity) and Structure (a collection of PeriodicSites, i.e.,
periodicity). Not meant to be instantiated directly.
"""
# Tolerance in Angstrom for determining if sites are too close.
DISTANCE_TOLERANCE = 0.5
@property
@abstractmethod
def sites(self) -> tuple[Site, ...]:
"""
Returns a tuple of sites.
"""
@abstractmethod
def get_distance(self, i: int, j: int) -> float:
"""
Returns distance between sites at index i and j.
Args:
i: Index of first site
j: Index of second site
Returns:
Distance between sites at index i and index j.
"""
@property
def distance_matrix(self) -> np.ndarray:
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this is overwritten to return the nearest image
distance.
"""
return all_distances(self.cart_coords, self.cart_coords)
@property
def species(self) -> list[Element | Species]:
"""
Only works for ordered structures.
Disordered structures will raise an AttributeError.
Returns:
([Species]) List of species at each site of the structure.
"""
return [site.specie for site in self]
@property
def species_and_occu(self) -> list[Composition]:
"""
List of species and occupancies at each site of the structure.
"""
return [site.species for site in self]
@property
def ntypesp(self) -> int:
"""Number of types of atoms."""
return len(self.types_of_species)
@property
def types_of_species(self) -> tuple[Element | Species | DummySpecies]:
"""
List of types of specie.
"""
# Cannot use set since we want a deterministic algorithm.
types = [] # type: List[Union[Element, Species, DummySpecies]]
for site in self:
for sp, v in site.species.items():
if v != 0:
types.append(sp)
return tuple(sorted(set(types))) # type: ignore
@property
def types_of_specie(self) -> tuple[Element | Species | DummySpecies]:
"""
Specie->Species rename. Maintained for backwards compatibility.
"""
return self.types_of_species
def group_by_types(self) -> Iterator[Site | PeriodicSite]:
"""Iterate over species grouped by type"""
for t in self.types_of_species:
for site in self:
if site.specie == t:
yield site
def indices_from_symbol(self, symbol: str) -> tuple[int, ...]:
"""
Returns a tuple with the sequential indices of the sites
that contain an element with the given chemical symbol.
"""
return tuple((i for i, specie in enumerate(self.species) if specie.symbol == symbol))
@property
def symbol_set(self) -> tuple[str, ...]:
"""
Tuple with the set of chemical symbols.
Note that len(symbol_set) == len(types_of_specie)
"""
return tuple(sorted(specie.symbol for specie in self.types_of_species))
@property
def atomic_numbers(self) -> tuple[int, ...]:
"""List of atomic numbers."""
try:
return tuple(site.specie.Z for site in self)
except AttributeError:
raise AttributeError("atomic_numbers available only for ordered Structures")
@property
def site_properties(self) -> dict[str, list]:
"""
Returns the site properties as a dict of sequences. E.g.,
{"magmom": (5,-5), "charge": (-4,4)}.
"""
props = {} # type: Dict[str, List]
prop_keys = set() # type: Set[str]
for site in self:
prop_keys.update(site.properties.keys())
for k in prop_keys:
props[k] = [site.properties.get(k, None) for site in self]
return props
def __contains__(self, site: object) -> bool:
return site in self.sites
def __iter__(self) -> Iterator[Site]:
return self.sites.__iter__()
def __getitem__(self, ind):
return self.sites[ind]
def __len__(self) -> int:
return len(self.sites)
def __hash__(self) -> int:
# for now, just use the composition hash code.
return self.composition.__hash__()
@property
def num_sites(self) -> int:
"""
Number of sites.
"""
return len(self)
@property
def cart_coords(self) -> np.ndarray:
"""
Returns an np.array of the Cartesian coordinates of sites in the
structure.
"""
return np.array([site.coords for site in self])
@property
def formula(self) -> str:
"""
(str) Returns the formula.
"""
return self.composition.formula
@property
def composition(self) -> Composition:
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float) # type: Dict[Species, float]
for site in self:
for species, occu in site.species.items():
elmap[species] += occu
return Composition(elmap)
@property
def charge(self) -> float:
"""
Returns the net charge of the structure based on oxidation states. If
Elements are found, a charge of 0 is assumed.
"""
charge = 0
for site in self:
for specie, amt in site.species.items():
charge += getattr(specie, "oxi_state", 0) * amt
return charge
@property
def is_ordered(self) -> bool:
"""
Checks if structure is ordered, meaning no partial occupancies in any
of the sites.
"""
return all(site.is_ordered for site in self)
def get_angle(self, i: int, j: int, k: int) -> float:
"""
Returns angle specified by three sites.
Args:
i: Index of first site.
j: Index of second site.
k: Index of third site.
Returns:
Angle in degrees.
"""
v1 = self[i].coords - self[j].coords
v2 = self[k].coords - self[j].coords
return get_angle(v1, v2, units="degrees")
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:
"""
Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23), np.dot(v12, v23)))
def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
@abstractmethod
def to(self, fmt: str = None, filename: str = None):
"""
Generates well-known string representations of SiteCollections (e.g.,
molecules / structures). Should return a string type or write to a file.
"""
@classmethod
@abstractmethod
def from_str(cls, input_string: str, fmt: Any):
"""
Reads in SiteCollection from a string.
"""
@classmethod
@abstractmethod
def from_file(cls, filename: str):
"""
Reads in SiteCollection from a filename.
"""
def add_site_property(self, property_name: str, values: list):
"""
Adds a property to a site. Note: This is the preferred method
for adding magnetic moments, selective dynamics, and related
site-specific properties to a structure/molecule object.
Examples:
structure.add_site_property("magmom", [1.0, 0.0])
structure.add_site_property("selective_dynamics", [[True, True, True], [False, False, False]])
Args:
property_name (str): The name of the property to add.
values (list): A sequence of values. Must be same length as
number of sites.
"""
if len(values) != len(self.sites):
raise ValueError("Values must be same length as sites.")
for site, val in zip(self.sites, values):
site.properties[property_name] = val
def remove_site_property(self, property_name: str):
"""
Removes a property to a site.
Args:
property_name (str): The name of the property to remove.
"""
for site in self.sites:
del site.properties[property_name]
def replace_species(self, species_mapping: dict[SpeciesLike, SpeciesLike]) -> None:
"""
Swap species. Note that this method modifies the structure in place.
Args:
species_mapping (dict): dict of species to swap. Species can be
elements too. E.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C.
"""
sp_mapping = {get_el_sp(k): v for k, v in species_mapping.items()}
sp_to_replace = set(sp_mapping.keys())
sp_in_structure = set(self.composition.keys())
if not sp_in_structure.issuperset(sp_to_replace):
warnings.warn(
"Some species to be substituted are not present in structure. Pls check your input. Species to be "
f"substituted = {sp_to_replace}; Species in structure = {sp_in_structure}"
)
for site in self.sites:
if sp_to_replace.intersection(site.species):
c = Composition()
for sp, amt in site.species.items():
new_sp = sp_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except Exception:
c += {new_sp: amt}
site.species = c
def add_oxidation_state_by_element(self, oxidation_states: dict[str, float]):
"""
Add oxidation states.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for site in self.sites:
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Species(sym, oxidation_states[sym])] = occu
site.species = Composition(new_sp)
except KeyError:
raise ValueError("Oxidation state of all elements must be specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states: list[float]):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
if len(oxidation_states) != len(self.sites):
raise ValueError("Oxidation states of all sites must be specified.")
for site, ox in zip(self.sites, oxidation_states):
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Species(sym, ox)] = occu
site.species = Composition(new_sp)
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for el, occu in site.species.items():
sym = el.symbol
new_sp[Element(sym)] += occu
site.species = Composition(new_sp)
def add_oxidation_state_by_guess(self, **kwargs):
"""
Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses()
"""
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or [{e.symbol: 0 for e in self.composition}]
self.add_oxidation_state_by_element(oxid_guess[0])
def add_spin_by_element(self, spins: dict[str, float]):
"""
Add spin states to a structure.
Args:
spins (dict): Dict of spins associated with elements or species,
e.g. {"Ni":+5} or {"Ni2+":5}
"""
for site in self.sites:
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[
Species(
sym,
oxidation_state=oxi_state,
properties={"spin": spins.get(str(sp), spins.get(sym, None))},
)
] = occu
site.species = Composition(new_sp)
def add_spin_by_site(self, spins: list[float]):
"""
Add spin states to a structure by site.
Args:
spins (list): List of spins
E.g., [+5, -5, 0, 0]
"""
if len(spins) != len(self.sites):
raise ValueError("Spin of all sites must be specified in the dictionary.")
for site, spin in zip(self.sites, spins):
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Species(sym, oxidation_state=oxi_state, properties={"spin": spin})] = occu
site.species = Composition(new_sp)
def remove_spin(self):
"""
Removes spin states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for sp, occu in site.species.items():
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Species(sp.symbol, oxidation_state=oxi_state)] += occu
site.species = new_sp
def extract_cluster(self, target_sites: list[Site], **kwargs):
"""
Extracts a cluster of atoms based on bond lengths
Args:
target_sites ([Site]): List of initial sites to nucleate cluster.
**kwargs: kwargs passed through to CovalentBond.is_bonded.
Returns:
[Site/PeriodicSite] Cluster of atoms.
"""
cluster = list(target_sites)
others = [site for site in self if site not in cluster]
size = 0
while len(cluster) > size:
size = len(cluster)
new_others = []
for site in others:
for site2 in cluster:
if CovalentBond.is_bonded(site, site2, **kwargs):
cluster.append(site)
break
else:
new_others.append(site)
others = new_others
return cluster
class IStructure(SiteCollection, MSONable):
"""
Basic immutable Structure object with periodicity. Essentially a sequence
of PeriodicSites having a common lattice. IStructure is made to be
(somewhat) immutable so that they can function as keys in a dict. To make
modifications, use the standard Structure object instead. Structure
extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a
structure is equivalent to going through the sites in sequence.
"""
def __init__(
self,
lattice: ArrayLike | Lattice,
species: Sequence[CompositionLike],
coords: Sequence[ArrayLike],
charge: float = None,
validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None,
) -> None:
"""
Create a periodic structure.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in Cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError(
"The list of atomic species must be of the same length as the list of fractional coordinates."
)
if isinstance(lattice, Lattice):
self._lattice = lattice
else:
self._lattice = Lattice(lattice)
sites = []
for i, sp in enumerate(species):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(
PeriodicSite(
sp,
coords[i],
self._lattice,
to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
properties=prop,
)
)
self._sites: tuple[PeriodicSite, ...] = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Structure contains sites that are ", "less than 0.01 Angstrom apart!"))
self._charge = charge
@classmethod
def from_sites(
cls,
sites: list[PeriodicSite],
charge: float = None,
validate_proximity: bool = False,
to_unit_cell: bool = False,
) -> IStructure | Structure:
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
charge: Charge of structure.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
if len(sites) < 1:
raise ValueError(f"You need at least one site to construct a {cls}")
prop_keys = [] # type: List[str]
props = {}
lattice = sites[0].lattice
for i, site in enumerate(sites):
if site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any(vv is None for vv in v):
warnings.warn(f"Not all sites have property {k}. Missing values are set to None.")
return cls(
lattice,
[site.species for site in sites],
[site.frac_coords for site in sites],
charge=charge,
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
)
@classmethod
def from_spacegroup(
cls,
sg: str,
lattice: list | np.ndarray | Lattice,
species: Sequence[str | Element | Species | DummySpecies | Composition],
coords: Sequence[Sequence[float]],
site_properties: dict[str, Sequence] = None,
coords_are_cartesian: bool = False,
tol: float = 1e-5,
) -> IStructure | Structure:
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in Cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
spg = SpaceGroup.from_int_number(i)
except ValueError:
spg = SpaceGroup(sg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not spg.is_compatible(latt):
raise ValueError(
f"Supplied lattice with parameters {latt.parameters} is incompatible with supplied spacegroup "
f"{spg.symbol}!"
)
if len(species) != len(coords):
raise ValueError(f"Supplied species and coords lengths ({len(species)} vs {len(coords)}) are different!")
frac_coords = (
np.array(coords, dtype=np.float_) if not coords_are_cartesian else latt.get_fractional_coords(coords)
)
props = {} if site_properties is None else site_properties
all_sp = [] # type: List[Union[str, Element, Species, DummySpecies, Composition]]
all_coords = [] # type: List[List[float]]
all_site_properties = collections.defaultdict(list) # type: Dict[str, List]
for i, (sp, c) in enumerate(zip(species, frac_coords)):
cc = spg.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc) # type: ignore
for k, v in props.items():
all_site_properties[k].extend([v[i]] * len(cc))
return cls(latt, all_sp, all_coords, site_properties=all_site_properties)
@classmethod
def from_magnetic_spacegroup(
cls,
msg: str | MagneticSpaceGroup,
lattice: list | np.ndarray | Lattice,
species: Sequence[str | Element | Species | DummySpecies | Composition],
coords: Sequence[Sequence[float]],
site_properties: dict[str, Sequence],
coords_are_cartesian: bool = False,
tol: float = 1e-5,
) -> IStructure | Structure:
"""
Generate a structure using a magnetic spacegroup. Note that only
symmetrically distinct species, coords and magmoms should be provided.]
All equivalent sites are generated from the spacegroup operations.
Args:
msg (str/list/:class:`pymatgen.symmetry.maggroups.MagneticSpaceGroup`):
The magnetic spacegroup.
If a string, it will be interpreted as one of the notations
supported by MagneticSymmetryGroup, e.g., "R-3'c" or "Fm'-3'm".
If a list of two ints, it will be interpreted as the number of
the spacegroup in its Belov, Neronova and Smirnova (BNS) setting.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Unlike Structure.from_spacegroup(),
this argument is mandatory, since magnetic moment information
has to be included. Note that the *direction* of the supplied
magnetic moment relative to the crystal is important, even if
the resulting structure is used for collinear calculations.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in Cartesian coordinates. Defaults to False.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
if "magmom" not in site_properties:
raise ValueError("Magnetic moments have to be defined.")
magmoms = [Magmom(m) for m in site_properties["magmom"]]
if not isinstance(msg, MagneticSpaceGroup):
msg = MagneticSpaceGroup(msg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not msg.is_compatible(latt):
raise ValueError(
f"Supplied lattice with parameters {latt.parameters} is incompatible with supplied spacegroup "
f"{msg.sg_symbol}!"
)
if len(species) != len(coords):
raise ValueError(f"Supplied species and coords lengths ({len(species)} vs {len(coords)}) are different!")
if len(species) != len(magmoms):
raise ValueError(f"Supplied species and magmom lengths ({len(species)} vs {len(magmoms)}) are different!")
frac_coords = coords if not coords_are_cartesian else latt.get_fractional_coords(coords)
all_sp = [] # type: List[Union[str, Element, Species, DummySpecies, Composition]]
all_coords = [] # type: List[List[float]]
all_magmoms = [] # type: List[float]
all_site_properties = collections.defaultdict(list) # type: Dict[str, List]
for i, (sp, c, m) in enumerate(zip(species, frac_coords, magmoms)): # type: ignore
cc, mm = msg.get_orbit(c, m, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
all_magmoms.extend(mm)
for k, v in site_properties.items():
if k != "magmom":
all_site_properties[k].extend([v[i]] * len(cc))
all_site_properties["magmom"] = all_magmoms
return cls(latt, all_sp, all_coords, site_properties=all_site_properties)
@property
def charge(self) -> float:
"""
Overall charge of the structure
"""
if self._charge is None:
return super().charge
return self._charge
@property
def distance_matrix(self) -> np.ndarray:
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this should return the nearest image distance.
"""
return self.lattice.get_all_distances(self.frac_coords, self.frac_coords)
@property
def sites(self) -> tuple[PeriodicSite, ...]:
"""
Returns an iterator for the sites in the Structure.
"""
return self._sites
@property
def lattice(self) -> Lattice:
"""
Lattice of the structure.
"""
return self._lattice
@property
def density(self) -> float:
"""
Returns the density in units of g/cc
"""
m = Mass(self.composition.weight, "amu")
return m.to("g") / (self.volume * Length(1, "ang").to("cm") ** 3)
@property
def pbc(self) -> tuple[bool, bool, bool]:
"""
Returns the periodicity of the structure.
"""
return self._lattice.pbc
@property
def is_3d_periodic(self) -> bool:
"""True if the Lattice is periodic in all directions."""
return self._lattice.is_3d_periodic
def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0) -> tuple[str, int]:
"""
Convenience method to quickly get the spacegroup of a structure.
Args:
symprec (float): Same definition as in SpacegroupAnalyzer.
Defaults to 1e-2.
angle_tolerance (float): Same definition as in SpacegroupAnalyzer.
Defaults to 5 degrees.
Returns:
spacegroup_symbol, international_number
"""
# Import within method needed to avoid cyclic dependency.
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
a = SpacegroupAnalyzer(self, symprec=symprec, angle_tolerance=angle_tolerance)
return a.get_space_group_symbol(), a.get_space_group_number()
def matches(self, other, anonymous=False, **kwargs) -> bool:
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
if not anonymous:
return m.fit(self, other)
return m.fit_anonymous(self, other)
def __eq__(self, other) -> bool:
if other is self:
return True
if other is None:
return False
if len(self) != len(other):
return False
if self.lattice != other.lattice:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
# For now, just use the composition hash code.
return self.composition.__hash__()
def __mul__(self, scaling_matrix: int | Sequence[int] | Sequence[Sequence[int]]) -> Structure:
"""
Makes a supercell. Allowing to have sites outside the unit cell
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
of the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. A sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
Returns:
Supercell structure. Note that a Structure is always returned,
even if the input structure is a subclass of Structure. This is
to avoid different arguments signatures from causing problems. If
you prefer a subclass to return its own type, you need to override
this method in the subclass.
"""
scale_matrix = np.array(scaling_matrix, int)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), int)
new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
for site in self:
for v in c_lat:
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
skip_checks=True,
)
new_sites.append(s)
new_charge = self._charge * np.linalg.det(scale_matrix) if self._charge else None
return Structure.from_sites(new_sites, charge=new_charge, to_unit_cell=True)
def __rmul__(self, scaling_matrix):
"""
Similar to __mul__ to preserve commutativeness.
"""
return self.__mul__(scaling_matrix)
@property
def frac_coords(self):
"""
Fractional coordinates as a Nx3 numpy array.
"""
return np.array([site.frac_coords for site in self._sites])
@property
def volume(self) -> float:
"""
Returns the volume of the structure.
"""
return self._lattice.volume
def get_distance(self, i: int, j: int, jimage=None) -> float:
"""
Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance
"""
return self[i].distance(self[j], jimage)
def get_sites_in_sphere(
self,
pt: ArrayLike,
r: float,
include_index: bool = False,
include_image: bool = False,
) -> list[PeriodicNeighbor]:
"""
Find all sites within a sphere from the point, including a site (if any)
sitting on the point itself. This includes sites in other periodic
images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = [] # type: List[PeriodicNeighbor]
for fcoord, dist, i, img in self._lattice.get_points_in_sphere(site_fcoords, pt, r):
nnsite = PeriodicNeighbor(
self[i].species,
fcoord,
self._lattice,
properties=self[i].properties,
nn_distance=dist,
image=img, # type: ignore
index=i,
)
neighbors.append(nnsite)
return neighbors
def get_neighbors(
self,
site: PeriodicSite,
r: float,
include_index: bool = False,
include_image: bool = False,
) -> list[PeriodicNeighbor]:
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
return self.get_all_neighbors(r, include_index=include_index, include_image=include_image, sites=[site])[0]
@deprecated(get_neighbors, "This is retained purely for checking purposes.")
def get_neighbors_old(self, site, r, include_index=False, include_image=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
nn = self.get_sites_in_sphere(site.coords, r, include_index=include_index, include_image=include_image)
return [d for d in nn if site != d[0]]
def _get_neighbor_list_py(
self,
r: float,
sites: list[PeriodicSite] = None,
numerical_tol: float = 1e-8,
exclude_self: bool = True,
) -> tuple[np.ndarray, ...]:
"""
A python version of getting neighbor_list. The returned values are a tuple of
numpy arrays (center_indices, points_indices, offset_vectors, distances).
Atom `center_indices[i]` has neighbor atom `points_indices[i]` that is
translated by `offset_vectors[i]` lattice vectors, and the distance is
`distances[i]`.
Args:
r (float): Radius of sphere
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be coincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
exclude_self (bool): whether to exclude atom neighboring with itself within
numerical tolerance distance, default to True
Returns: (center_indices, points_indices, offset_vectors, distances)
"""
neighbors = self.get_all_neighbors_py(
r=r, include_index=True, include_image=True, sites=sites, numerical_tol=1e-8
)
center_indices = []
points_indices = []
offsets = []
distances = []
for i, nns in enumerate(neighbors):
if len(nns) > 0:
for n in nns:
if exclude_self and (i == n.index) and (n.nn_distance <= numerical_tol):
continue
center_indices.append(i)
points_indices.append(n.index)
offsets.append(n.image)
distances.append(n.nn_distance)
return tuple(
(
np.array(center_indices),
np.array(points_indices),
np.array(offsets),
np.array(distances),
)
)
def get_neighbor_list(
self,
r: float,
sites: Sequence[PeriodicSite] = None,
numerical_tol: float = 1e-8,
exclude_self: bool = True,
) -> tuple[np.ndarray, ...]:
"""
Get neighbor lists using numpy array representations without constructing
Neighbor objects. If the cython extension is installed, this method will
be orders of magnitude faster than `get_all_neighbors_old` and 2-3x faster
than `get_all_neighbors`.
The returned values are a tuple of numpy arrays
(center_indices, points_indices, offset_vectors, distances).
Atom `center_indices[i]` has neighbor atom `points_indices[i]` that is
translated by `offset_vectors[i]` lattice vectors, and the distance is
`distances[i]`.
Args:
r (float): Radius of sphere
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be coincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
exclude_self (bool): whether to exclude atom neighboring with itself within
numerical tolerance distance, default to True
Returns: (center_indices, points_indices, offset_vectors, distances)
"""
try:
from pymatgen.optimization.neighbors import find_points_in_spheres
except ImportError:
return self._get_neighbor_list_py(r, sites, exclude_self=exclude_self) # type: ignore
else:
if sites is None:
sites = self.sites
site_coords = np.array([site.coords for site in sites], dtype=float)
cart_coords = np.ascontiguousarray(np.array(self.cart_coords), dtype=float)
lattice_matrix = np.ascontiguousarray(np.array(self.lattice.matrix), dtype=float)
r = float(r)
center_indices, points_indices, images, distances = find_points_in_spheres(
cart_coords,
site_coords,
r=r,
pbc=np.array(self.pbc, dtype=int),
lattice=lattice_matrix,
tol=numerical_tol,
)
cond = np.array([True] * len(center_indices))
if exclude_self:
self_pair = (center_indices == points_indices) & (distances <= numerical_tol)
cond = ~self_pair
return tuple(
(
center_indices[cond],
points_indices[cond],
images[cond],
distances[cond],
)
)
def get_symmetric_neighbor_list(
self,
r: float,
sg: str,
unique: bool = False,
numerical_tol: float = 1e-8,
exclude_self: bool = True,
) -> tuple[np.ndarray, ...]:
"""
Similar to 'get_neighbor_list' with sites=None, but the neighbors are
grouped by symmetry. The returned values are a tuple of numpy arrays
(center_indices, points_indices, offset_vectors, distances,
symmetry_indices). Atom `center_indices[i]` has neighbor atom
`points_indices[i]` that is translated by `offset_vectors[i]` lattice
vectors, and the distance is `distances[i]`. Symmetry_idx groups the bonds
that are related by a symmetry of the provided space group and symmetry_op
is the operation that relates the first bond of the same symmetry_idx to
the respective atom. The first bond maps onto itself via the Identity. The
output is sorted w.r.t. to symmetry_indices. If unique is True only one of the
two bonds connecting two points is given. Out of the two, the bond that does not
reverse the sites is chosen.
Args:
r (float): Radius of sphere
sg (str/int): The spacegroup the symmetry operations of which will be
used to classify the neighbors. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
If None, 'get_space_group_info' will be used to determine the
space group, default to None.
unique (bool): Whether a bond is given for both, or only a single
direction is given. The default is False.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be coincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
exclude_self (bool): whether to exclude atom neighboring with itself within
numerical tolerance distance, default to True
Returns: (center_indices, points_indices, offset_vectors, distances,
symmetry_indices, symmetry_ops)
"""
from pymatgen.symmetry.groups import SpaceGroup
if sg is None:
ops = SpaceGroup(self.get_space_group_info()[0]).symmetry_ops
else:
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i)
except ValueError:
sgp = SpaceGroup(sg)
ops = sgp.symmetry_ops
latt = self.lattice
if not sgp.is_compatible(latt):
raise ValueError(
f"Supplied lattice with parameters {latt.parameters} is incompatible with "
f"supplied spacegroup {sgp.symbol}!"
)
# get a list of neighbors up to distance r
bonds = self.get_neighbor_list(r)
if unique:
redundant = []
# compare all neighbors pairwise to find the pairs that connect the same
# two sites, but with an inverted vector (R=-R) that connects the two and add
# one of each pair to the redundant list.
for it, (i, j, R, d) in enumerate(zip(*bonds)):
if it in redundant:
pass
else:
for it2, (i2, j2, R2, d2) in enumerate(zip(*bonds)):
bool1 = i == j2
bool2 = j == i2
bool3 = (R == -R2).all()
bool4 = np.isclose(d, d2, atol=numerical_tol)
if bool1 and bool2 and bool3 and bool4:
redundant.append(it2)
# delete the redundant neighbors
m = ~np.in1d(np.arange(len(bonds[0])), redundant)
idcs_dist = np.argsort(bonds[3][m])
bonds = (bonds[0][m][idcs_dist], bonds[1][m][idcs_dist], bonds[2][m][idcs_dist], bonds[3][m][idcs_dist])
# expand the output tuple by symmetry_indices and symmetry_ops.
nbonds = len(bonds[0])
symmetry_indices = np.empty(nbonds)
symmetry_indices[:] = np.NaN
symmetry_ops = np.empty(len(symmetry_indices), dtype=object)
symmetry_identity = SymmOp.from_rotation_and_translation(np.eye(3), np.zeros(3))
symmetry_index = 0
# Again, compare all neighbors pairwise. For each pair of neighbors, all the symmetry operations of the provided
# space group are iterated over. If an operation is found that connects the two bonds, it is assigned the same
# symmetry index it is compared to, and the symmetry operation that connets the two is saved. To compare two
# neighbors 'SymmOp.are_symmetrically_related_vectors' is used. It is also checked whether applying the
# connecting symmetry operation generates the neighbor-pair itself, or the equivalent version with the
# sites exchanged and R reversed. The output is always reordered such that the former case is true.
for it in range(nbonds):
if np.isnan(symmetry_indices[it]):
symmetry_indices[it] = symmetry_index
symmetry_ops[it] = symmetry_identity
for it2 in np.arange(nbonds)[np.isnan(symmetry_indices)]:
equal_distance = np.isclose(bonds[3][it], bonds[3][it2], atol=numerical_tol)
if equal_distance:
from_a = self[bonds[0][it]].frac_coords
to_a = self[bonds[1][it]].frac_coords
r_a = bonds[2][it]
from_b = self[bonds[0][it2]].frac_coords
to_b = self[bonds[1][it2]].frac_coords
r_b = bonds[2][it2]
for op in ops:
are_related, is_reversed = op.are_symmetrically_related_vectors(
from_a, to_a, r_a, from_b, to_b, r_b
)
if are_related and not is_reversed:
symmetry_indices[it2] = symmetry_index
symmetry_ops[it2] = op
elif are_related and is_reversed:
symmetry_indices[it2] = symmetry_index
symmetry_ops[it2] = op
bonds[0][it2], bonds[1][it2] = bonds[1][it2], bonds[0][it2]
bonds[2][it2] = -bonds[2][it2]
symmetry_index += 1
# the bonds are ordered by their symmetry index
idcs_symid = np.argsort(symmetry_indices)
bonds = (
bonds[0][idcs_symid],
bonds[1][idcs_symid],
bonds[2][idcs_symid],
bonds[3][idcs_symid],
)
symmetry_indices = symmetry_indices[idcs_symid]
symmetry_ops = symmetry_ops[idcs_symid]
# the groups of neighbors with the same symmetry index are ordered such that neighbors
# that are the first occurence of a new symmetry index in the ordered output are the ones
# that are assigned the Identity as a symmetry operation.
idcs_symop = np.arange(nbonds)
identity_idcs = np.where(symmetry_ops == symmetry_identity)[0]
for symmetry_idx in np.unique(symmetry_indices):
first_idx = np.argmax(symmetry_indices == symmetry_idx)
for second_idx in identity_idcs:
if symmetry_indices[second_idx] == symmetry_idx:
idcs_symop[first_idx], idcs_symop[second_idx] = idcs_symop[second_idx], idcs_symop[first_idx]
return (
bonds[0][idcs_symop],
bonds[1][idcs_symop],
bonds[2][idcs_symop],
bonds[3][idcs_symop],
symmetry_indices[idcs_symop],
symmetry_ops[idcs_symop],
)
def get_all_neighbors(
self,
r: float,
include_index: bool = False,
include_image: bool = False,
sites: Sequence[PeriodicSite] = None,
numerical_tol: float = 1e-8,
) -> list[list[PeriodicNeighbor]]:
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this
operation translates all atoms to within the unit cell (having
fractional coordinates within [0,1)). This means that the "image" of a
site does not correspond to how much it has been translates from its
current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be coincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
Returns:
[[:class:`pymatgen.core.structure.PeriodicNeighbor`], ..]
"""
if sites is None:
sites = self.sites
center_indices, points_indices, images, distances = self.get_neighbor_list(
r=r, sites=sites, numerical_tol=numerical_tol
)
if len(points_indices) < 1:
return [[]] * len(sites)
f_coords = self.frac_coords[points_indices] + images
neighbor_dict: dict[int, list] = collections.defaultdict(list)
lattice = self.lattice
atol = Site.position_atol
all_sites = self.sites
for cindex, pindex, image, f_coord, d in zip(center_indices, points_indices, images, f_coords, distances):
psite = all_sites[pindex]
csite = sites[cindex]
if (
d > numerical_tol
or
# This simply compares the psite and csite. The reason why manual comparison is done is
# for speed. This does not check the lattice since they are always equal. Also, the or construct
# returns True immediately once one of the conditions are satisfied.
psite.species != csite.species
or (not np.allclose(psite.coords, csite.coords, atol=atol))
or (not psite.properties == csite.properties)
):
neighbor_dict[cindex].append(
PeriodicNeighbor(
species=psite.species,
coords=f_coord,
lattice=lattice,
properties=psite.properties,
nn_distance=d,
index=pindex,
image=tuple(image),
)
)
neighbors: list[list[PeriodicNeighbor]] = []
for i in range(len(sites)):
neighbors.append(neighbor_dict[i])
return neighbors
def get_all_neighbors_py(
self,
r: float,
include_index: bool = False,
include_image: bool = False,
sites: Sequence[PeriodicSite] = None,
numerical_tol: float = 1e-8,
) -> list[list[PeriodicNeighbor]]:
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this
operation translates all atoms to within the unit cell (having
fractional coordinates within [0,1)). This means that the "image" of a
site does not correspond to how much it has been translates from its
current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
sites (list of Sites or None): sites for getting all neighbors,
default is None, which means neighbors will be obtained for all
sites. This is useful in the situation where you are interested
only in one subspecies type, and makes it a lot faster.
numerical_tol (float): This is a numerical tolerance for distances.
Sites which are < numerical_tol are determined to be coincident
with the site. Sites which are r + numerical_tol away is deemed
to be within r from the site. The default of 1e-8 should be
ok in most instances.
Returns:
[[:class:`pymatgen.core.structure.PeriodicNeighbor`],...]
"""
if sites is None:
sites = self.sites
site_coords = np.array([site.coords for site in sites])
point_neighbors = get_points_in_spheres(
self.cart_coords,
site_coords,
r=r,
pbc=self.pbc,
numerical_tol=numerical_tol,
lattice=self.lattice,
)
neighbors: list[list[PeriodicNeighbor]] = []
for point_neighbor, site in zip(point_neighbors, sites):
nns: list[PeriodicNeighbor] = []
if len(point_neighbor) < 1:
neighbors.append([])
continue
for n in point_neighbor:
coord, d, index, image = n
if (d > numerical_tol) or (self[index] != site):
neighbor = PeriodicNeighbor(
species=self[index].species,
coords=coord,
lattice=self.lattice,
properties=self[index].properties,
nn_distance=d,
index=index,
image=tuple(image),
)
nns.append(neighbor)
neighbors.append(nns)
return neighbors
@deprecated(get_all_neighbors, "This is retained purely for checking purposes.")
def get_all_neighbors_old(self, r, include_index=False, include_image=False, include_site=True):
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this
operation translates all atoms to within the unit cell (having
fractional coordinates within [0,1)). This means that the "image" of a
site does not correspond to how much it has been translates from its
current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Whether to include the non-supercell site
in the returned data
include_image (bool): Whether to include the supercell image
in the returned data
include_site (bool): Whether to include the site in the returned
data. Defaults to True.
Returns:
[:class:`pymatgen.core.structure.PeriodicNeighbor`]
"""
# Use same algorithm as get_sites_in_sphere to determine supercell but
# loop over all atoms in crystal
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))
nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr
nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr
all_ranges = [np.arange(x, y) for x, y in zip(nmin, nmax)]
latt = self._lattice
matrix = latt.matrix
neighbors = [[] for _ in range(len(self._sites))]
all_fcoords = np.mod(self.frac_coords, 1)
coords_in_cell = np.dot(all_fcoords, matrix)
site_coords = self.cart_coords
indices = np.arange(len(self))
for image in itertools.product(*all_ranges):
coords = np.dot(image, matrix) + coords_in_cell
all_dists = all_distances(coords, site_coords)
all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8)
for (j, d, within_r) in zip(indices, all_dists, all_within_r):
if include_site:
nnsite = PeriodicSite(
self[j].species,
coords[j],
latt,
properties=self[j].properties,
coords_are_cartesian=True,
skip_checks=True,
)
for i in indices[within_r]:
item = []
if include_site:
item.append(nnsite)
item.append(d[i])
if include_index:
item.append(j)
# Add the image, if requested
if include_image:
item.append(image)
neighbors[i].append(item)
return neighbors
def get_neighbors_in_shell(
self, origin: ArrayLike, r: float, dr: float, include_index: bool = False, include_image: bool = False
) -> list[PeriodicNeighbor]:
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
include_index (bool): Deprecated. Now, the non-supercell site index
is always included in the returned data.
include_image (bool): Deprecated. Now the supercell image
is always included in the returned data.
Returns:
[NearestNeighbor] where Nearest Neighbor is a named tuple containing
(site, distance, index, image).
"""
outer = self.get_sites_in_sphere(origin, r + dr, include_index=include_index, include_image=include_image)
inner = r - dr
return [t for t in outer if t.nn_distance > inner]
def get_sorted_structure(self, key: Callable | None = None, reverse: bool = False) -> IStructure | Structure:
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
return type(self).from_sites(sites, charge=self._charge)
def get_reduced_structure(self, reduction_algo: Literal["niggli", "LLL"] = "niggli") -> IStructure | Structure:
"""
Get a reduced structure.
Args:
reduction_algo ("niggli" | "LLL"): The lattice reduction algorithm to use.
Defaults to "niggli".
"""
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError(f"Invalid reduction algo : {reduction_algo}")
if reduced_latt != self.lattice:
return self.__class__(
reduced_latt,
self.species_and_occu,
self.cart_coords, # type: ignore
coords_are_cartesian=True,
to_unit_cell=True,
site_properties=self.site_properties,
charge=self._charge,
)
return self.copy()
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(
self._lattice,
self.species_and_occu,
self.frac_coords,
charge=self._charge,
site_properties=props,
)
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p, v in props.items():
site_props[p] = v[i]
new_sites.append(
PeriodicSite(
site.species,
frac_coords,
reduced_latt,
to_unit_cell=True,
properties=site_props,
skip_checks=True,
)
)
new_sites = sorted(new_sites)
return type(self).from_sites(new_sites, charge=self._charge)
def interpolate(
self,
end_structure: IStructure | Structure,
nimages: int | Iterable = 10,
interpolate_lattices: bool = False,
pbc: bool = True,
autosort_tol: float = 0,
) -> list[IStructure | Structure]:
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int,list): No. of interpolation images or a list of
interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
autosort_tol (float): A distance tolerance in angstrom in
which to automatically sort end_structure to match to the
closest points in this particular structure. This is usually
what you want in a NEB calculation. 0 implies no sorting.
Otherwise, a 0.5 value usually works pretty well.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
# Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if not (interpolate_lattices or self.lattice == end_structure.lattice):
raise ValueError("Structures with different lattices!")
if not isinstance(nimages, collections.abc.Iterable):
images = np.arange(nimages + 1) / nimages
else:
images = nimages # type: ignore
# Check that both structures have the same species
for i, site in enumerate(self):
if site.species != end_structure[i].species:
raise ValueError(
"Different species!\nStructure 1:\n" + str(self) + "\nStructure 2\n" + str(end_structure)
)
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
if autosort_tol:
dist_matrix = self.lattice.get_all_distances(start_coords, end_coords)
site_mappings = collections.defaultdict(list) # type: Dict[int, List[int]]
unmapped_start_ind = []
for i, row in enumerate(dist_matrix):
ind = np.where(row < autosort_tol)[0]
if len(ind) == 1:
site_mappings[i].append(ind[0])
else:
unmapped_start_ind.append(i)
if len(unmapped_start_ind) > 1:
raise ValueError(f"Unable to reliably match structures with {autosort_tol = }, {unmapped_start_ind = }")
sorted_end_coords = np.zeros_like(end_coords)
matched = []
for i, j in site_mappings.items():
if len(j) > 1:
raise ValueError(
f"Unable to reliably match structures with auto_sort_tol = {autosort_tol}. "
"More than one site match!"
)
sorted_end_coords[i] = end_coords[j[0]]
matched.append(j[0])
if len(unmapped_start_ind) == 1:
i = unmapped_start_ind[0]
j = list(set(range(len(start_coords))).difference(matched))[0] # type: ignore
sorted_end_coords[i] = end_coords[j]
end_coords = sorted_end_coords
vec = end_coords - start_coords
if pbc:
vec[:, self.pbc] -= np.round(vec[:, self.pbc])
sp = self.species_and_occu
structs = []
if interpolate_lattices:
# interpolate lattice matrices using polar decomposition
from scipy.linalg import polar
# u is a unitary rotation, p is stretch
u, p = polar(np.dot(end_structure.lattice.matrix.T, np.linalg.inv(self.lattice.matrix.T)))
lvec = p - np.identity(3)
lstart = self.lattice.matrix.T
for x in images:
if interpolate_lattices:
l_a = np.dot(np.identity(3) + x * lvec, lstart).T
lat = Lattice(l_a)
else:
lat = self.lattice
fcoords = start_coords + x * vec
structs.append(self.__class__(lat, sp, fcoords, site_properties=self.site_properties))
return structs
def get_miller_index_from_site_indexes(self, site_ids, round_dp=4, verbose=True):
"""
Get the Miller index of a plane from a set of sites indexes.
A minimum of 3 sites are required. If more than 3 sites are given
the best plane that minimises the distance to all points will be
calculated.
Args:
site_ids (list of int): A list of site indexes to consider. A
minimum of three site indexes are required. If more than three
sites are provided, the best plane that minimises the distance
to all sites will be calculated.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index.
"""
return self.lattice.get_miller_index_from_coords(
self.frac_coords[site_ids],
coords_are_cartesian=False,
round_dp=round_dp,
verbose=verbose,
)
def get_primitive_structure(
self, tolerance: float = 0.25, use_site_props: bool = False, constrain_latt: list | dict | None = None
):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differentiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found.
"""
if constrain_latt is None:
constrain_latt = []
def site_label(site):
if not use_site_props:
return site.species_string
d = [site.species_string]
for k in sorted(site.properties.keys()):
d.append(k + "=" + str(site.properties[k]))
return ", ".join(d)
# group sites by species string
sites = sorted(self._sites, key=site_label)
grouped_sites = [list(a[1]) for a in itertools.groupby(sites, key=site_label)]
grouped_fcoords = [np.array([s.frac_coords for s in g]) for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[
[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in itertools.product(range(a), range(a), range(e))
]
)
# we can't let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = functools.reduce(math.gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m**2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites, grouped_fcoords, grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(
new_l,
new_sp,
new_coords,
site_properties=new_props,
coords_are_cartesian=False,
)
# Default behavior
p = s.get_primitive_structure(
tolerance=tolerance,
use_site_props=use_site_props,
constrain_latt=constrain_latt,
).get_reduced_structure()
if not constrain_latt:
return p
# Only return primitive structures that
# satisfy the restriction condition
p_latt, s_latt = p.lattice, self.lattice
if type(constrain_latt).__name__ == "list":
if all(getattr(p_latt, pp) == getattr(s_latt, pp) for pp in constrain_latt):
return p
elif type(constrain_latt).__name__ == "dict":
if all(
getattr(p_latt, pp) == constrain_latt[pp] for pp in constrain_latt.keys() # type: ignore
):
return p
return self.copy()
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
if self._charge:
if self._charge >= 0:
outs.append(f"Overall Charge: +{self._charge}")
else:
outs.append(f"Overall Charge: -{self._charge}")
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = [
f"Full Formula ({self.composition.formula})",
f"Reduced Formula: {self.composition.reduced_formula}",
]
def to_s(x):
return f"{x:0.6f}"
outs.append("abc : " + " ".join([to_s(i).rjust(10) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10) for i in self.lattice.angles]))
outs.append("pbc : " + " ".join([str(p).rjust(10) for p in self.lattice.pbc]))
if self._charge:
if self._charge >= 0:
outs.append(f"Overall Charge: +{self._charge}")
else:
outs.append(f"Overall Charge: -{self._charge}")
outs.append(f"Sites ({len(self)})")
data = []
props = self.site_properties
keys = sorted(props.keys())
for i, site in enumerate(self):
row = [str(i), site.species_string]
row.extend([to_s(j) for j in site.frac_coords])
for k in keys:
row.append(props[k][i])
data.append(row)
outs.append(
tabulate(
data,
headers=["#", "SP", "a", "b", "c"] + keys,
)
)
return "\n".join(outs)
def get_orderings(self, mode: Literal["enum", "sqs"] = "enum", **kwargs) -> list[Structure]:
"""
Returns list of orderings for a disordered structure. If structure
does not contain disorder, the default structure is returned.
Args:
mode ("enum" | "sqs"): Either "enum" or "sqs". If enum,
the enumlib will be used to return all distinct
orderings. If sqs, mcsqs will be used to return
an sqs structure.
kwargs: kwargs passed to either
pymatgen.command_line..enumlib_caller.EnumlibAdaptor
or pymatgen.command_line.mcsqs_caller.run_mcsqs.
For run_mcsqs, a default cluster search of 2 cluster interactions
with 1NN distance and 3 cluster interactions with 2NN distance
is set.
Returns:
List[Structure]
"""
if self.is_ordered:
return [self]
if mode.startswith("enum"):
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
adaptor = EnumlibAdaptor(self, **kwargs)
adaptor.run()
return adaptor.structures
if mode == "sqs":
from pymatgen.command_line.mcsqs_caller import run_mcsqs
if "clusters" not in kwargs:
disordered_sites = [site for site in self if not site.is_ordered]
subset_structure = Structure.from_sites(disordered_sites)
dist_matrix = subset_structure.distance_matrix
dists = sorted(set(dist_matrix.ravel()))
unique_dists = []
for i in range(1, len(dists)):
if dists[i] - dists[i - 1] > 0.1:
unique_dists.append(dists[i])
clusters = {(i + 2): d + 0.01 for i, d in enumerate(unique_dists) if i < 2}
kwargs["clusters"] = clusters
return [run_mcsqs(self, **kwargs).bestsqs]
raise ValueError()
def as_dict(self, verbosity=1, fmt=None, **kwargs):
"""
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and Cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON-serializable dict representation.
"""
if fmt == "abivars":
"""Returns a dictionary with the ABINIT variables."""
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {
"@module": type(self).__module__,
"@class": type(self).__name__,
"charge": self.charge,
"lattice": latt_dict,
"sites": [],
}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
def as_dataframe(self):
"""
Returns a Pandas dataframe of the sites. Structure level attributes are stored in DataFrame.attrs. Example:
Species a b c x y z magmom
0 (Si) 0.0 0.0 0.000000e+00 0.0 0.000000e+00 0.000000e+00 5
1 (Si) 0.0 0.0 1.000000e-07 0.0 -2.217138e-07 3.135509e-07 -5
"""
data = []
site_properties = self.site_properties
prop_keys = list(site_properties.keys())
for site in self:
row = [site.species] + list(site.frac_coords) + list(site.coords)
for k in prop_keys:
row.append(site.properties.get(k))
data.append(row)
import pandas as pd
df = pd.DataFrame(data, columns=["Species", "a", "b", "c", "x", "y", "z"] + prop_keys)
df.attrs["Reduced Formula"] = self.composition.reduced_formula
df.attrs["Lattice"] = self.lattice
return df
@classmethod
def from_dict(cls, d: dict[str, Any], fmt: Literal["abivars"] | None = None):
"""
Reconstitute a Structure object from a dict representation of Structure
created using as_dict().
Args:
d (dict): Dict representation of structure.
fmt ('abivars' | None): Use structure_from_abivars() to parse the dict. Defaults to None.
Returns:
Structure object
"""
if fmt == "abivars":
from pymatgen.io.abinit.abiobjects import structure_from_abivars
return structure_from_abivars(cls=cls, **d)
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
charge = d.get("charge", None)
return cls.from_sites(sites, charge=charge)
def to(self, fmt: str = None, filename: str = None, **kwargs) -> str | None:
"""
Outputs the structure to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "cif", "poscar", "cssr", "json".
Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
**kwargs: Kwargs passthru to relevant methods. E.g., This allows
the passing of parameters like symprec to the
CifWriter.__init__ method for generation of symmetric cifs.
Returns:
(str) if filename is None. None otherwise.
"""
filename = filename or ""
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename)
if fmt == "cif" or fnmatch(fname.lower(), "*.cif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, **kwargs)
elif fmt == "mcif" or fnmatch(fname.lower(), "*.mcif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, write_magmoms=True, **kwargs)
elif fmt == "poscar" or fnmatch(fname, "*POSCAR*"):
from pymatgen.io.vasp import Poscar
writer = Poscar(self, **kwargs)
elif fmt == "cssr" or fnmatch(fname.lower(), "*.cssr*"):
from pymatgen.io.cssr import Cssr
writer = Cssr(self) # type: ignore
elif fmt == "json" or fnmatch(fname.lower(), "*.json"):
s = json.dumps(self.as_dict())
if filename:
with zopen(filename, "wt") as f:
f.write(s)
return s
elif fmt == "xsf" or fnmatch(fname.lower(), "*.xsf*"):
from pymatgen.io.xcrysden import XSF
s = XSF(self).to_string()
if filename:
with zopen(fname, "wt", encoding="utf8") as f:
f.write(s)
return s
elif (
fmt == "mcsqs" or fnmatch(fname, "*rndstr.in*") or fnmatch(fname, "*lat.in*") or fnmatch(fname, "*bestsqs*")
):
from pymatgen.io.atat import Mcsqs
s = Mcsqs(self).to_string()
if filename:
with zopen(fname, "wt", encoding="ascii") as f:
f.write(s)
return s
elif fmt == "prismatic" or fnmatch(fname, "*prismatic*"):
from pymatgen.io.prismatic import Prismatic
s = Prismatic(self).to_string()
return s
elif fmt == "yaml" or fnmatch(fname, "*.yaml*") or fnmatch(fname, "*.yml*"):
yaml = YAML()
if filename:
with zopen(filename, "wt") as f:
yaml.dump(self.as_dict(), f)
return None
sio = StringIO()
yaml.dump(self.as_dict(), sio)
return sio.getvalue()
elif fmt == "fleur-inpgen" or fnmatch(fname, "*.in*"):
from pymatgen.io.fleur import FleurInput
writer = FleurInput(self, **kwargs)
else:
raise ValueError(f"Invalid format: `{str(fmt)}`")
if filename:
writer.write_file(filename)
return None
return str(writer)
@classmethod
def from_str(
cls,
input_string: str,
fmt: Literal["cif", "poscar", "cssr", "json", "yaml", "xsf", "mcsqs"],
primitive=False,
sort=False,
merge_tol=0.0,
):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A file format specification. One of "cif", "poscar", "cssr",
"json", "yaml", "xsf", "mcsqs".
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
fmt_low = fmt.lower()
if fmt_low == "cif":
from pymatgen.io.cif import CifParser
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt_low == "poscar":
from pymatgen.io.vasp import Poscar
s = Poscar.from_string(input_string, False, read_velocities=False).structure
elif fmt_low == "cssr":
from pymatgen.io.cssr import Cssr
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt_low == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt_low == "yaml":
yaml = YAML()
d = yaml.load(input_string)
s = Structure.from_dict(d)
elif fmt_low == "xsf":
from pymatgen.io.xcrysden import XSF
s = XSF.from_string(input_string).structure
elif fmt_low == "mcsqs":
from pymatgen.io.atat import Mcsqs
s = Mcsqs.structure_from_string(input_string)
elif fmt == "fleur-inpgen":
from pymatgen.io.fleur import FleurInput
s = FleurInput.from_string(input_string, inpgen_input=True).structure
elif fmt == "fleur":
from pymatgen.io.fleur import FleurInput
s = FleurInput.from_string(input_string, inpgen_input=False).structure
else:
raise ValueError(f"Unrecognized format `{fmt}`!")
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s)
@classmethod
def from_file(cls, filename, primitive=False, sort=False, merge_tol=0.0):
"""
Reads a structure from a file. For example, anything ending in
a "cif" is assumed to be a Crystallographic Information Format file.
Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
vasprun.xml, CSSR, Netcdf and pymatgen's JSON-serialized structures.
Args:
filename (str): The filename to read from.
primitive (bool): Whether to convert to a primitive cell
Only available for cifs. Defaults to False.
sort (bool): Whether to sort sites. Default to False.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
Structure.
"""
filename = str(filename)
if filename.endswith(".nc"):
# Read Structure from a netcdf file.
from pymatgen.io.abinit.netcdf import structure_from_ncdata
s = structure_from_ncdata(filename, cls=cls)
if sort:
s = s.get_sorted_structure()
return s
from pymatgen.io.exciting import ExcitingInput
from pymatgen.io.lmto import LMTOCtrl
from pymatgen.io.vasp import Chgcar, Vasprun
fname = os.path.basename(filename)
with zopen(filename, "rt") as f:
contents = f.read()
if fnmatch(fname.lower(), "*.cif*") or fnmatch(fname.lower(), "*.mcif*"):
return cls.from_str(contents, fmt="cif", primitive=primitive, sort=sort, merge_tol=merge_tol)
if fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or fnmatch(fname, "*.vasp"):
s = cls.from_str(
contents,
fmt="poscar",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
s = Chgcar.from_file(filename).structure
elif fnmatch(fname, "vasprun*.xml*"):
s = Vasprun(filename).final_structure
elif fnmatch(fname.lower(), "*.cssr*"):
return cls.from_str(
contents,
fmt="cssr",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(
contents,
fmt="json",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "*.yaml*") or fnmatch(fname, "*.yml*"):
return cls.from_str(
contents,
fmt="yaml",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "*.xsf"):
return cls.from_str(contents, fmt="xsf", primitive=primitive, sort=sort, merge_tol=merge_tol)
elif fnmatch(fname, "input*.xml"):
return ExcitingInput.from_file(fname).structure
elif fnmatch(fname, "*rndstr.in*") or fnmatch(fname, "*lat.in*") or fnmatch(fname, "*bestsqs*"):
return cls.from_str(
contents,
fmt="mcsqs",
primitive=primitive,
sort=sort,
merge_tol=merge_tol,
)
elif fnmatch(fname, "CTRL*"):
return LMTOCtrl.from_file(filename=filename).structure
elif fnmatch(fname, "inp*.xml") or fnmatch(fname, "*.in*") or fnmatch(fname, "inp_*"):
from pymatgen.io.fleur import FleurInput
s = FleurInput.from_file(filename).structure
else:
raise ValueError("Unrecognized file extension!")
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
s.__class__ = cls
return s
class IMolecule(SiteCollection, MSONable):
"""
Basic immutable Molecule object without periodicity. Essentially a
sequence of sites. IMolecule is made to be immutable so that they can
function as keys in a dict. For a mutable molecule,
use the :class:Molecule.
Molecule extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a molecule is
equivalent to going through the sites in sequence.
"""
def __init__(
self,
species: Sequence[CompositionLike],
coords: Sequence[ArrayLike],
charge: float = 0.0,
spin_multiplicity: int = None,
validate_proximity: bool = False,
site_properties: dict = None,
charge_spin_check: bool = True,
):
"""
Creates a Molecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Species, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of Cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
charge_spin_check (bool): Whether to check that the charge and
spin multiplicity are compatible with each other. Defaults
to True.
"""
if len(species) != len(coords):
raise StructureError(
(
"The list of atomic species must be of the",
" same length as the list of fractional ",
"coordinates.",
)
)
self._charge_spin_check = charge_spin_check
sites = []
for i, _ in enumerate(species):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(Site(species[i], coords[i], properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError("Molecule contains sites that are less than 0.01 Angstrom apart!")
self._charge = charge
nelectrons = 0.0
for site in sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecies):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if charge_spin_check and (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
f"Charge of {self._charge} and spin multiplicity of {spin_multiplicity} is not possible for "
"this molecule!"
)
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
@property
def charge(self) -> float:
"""
Charge of molecule
"""
return self._charge
@property
def spin_multiplicity(self) -> float:
"""
Spin multiplicity of molecule.
"""
return self._spin_multiplicity
@property
def nelectrons(self) -> float:
"""
Number of electrons in the molecule.
"""
return self._nelectrons
@property
def center_of_mass(self) -> np.ndarray:
"""
Center of mass of molecule.
"""
center = np.zeros(3)
total_weight: float = 0
for site in self:
wt = site.species.weight
center += site.coords * wt
total_weight += wt
return center / total_weight
@property
def sites(self) -> tuple[Site, ...]:
"""
Returns a tuple of sites in the Molecule.
"""
return self._sites
@classmethod
def from_sites(
cls,
sites: Sequence[Site],
charge: float = 0,
spin_multiplicity: int = None,
validate_proximity: bool = False,
charge_spin_check: bool = True,
) -> IMolecule | Molecule:
"""
Convenience constructor to make a Molecule from a list of sites.
Args:
sites ([Site]): Sequence of Sites.
charge (int): Charge of molecule. Defaults to 0.
spin_multiplicity (int): Spin multicipity. Defaults to None,
in which it is determined automatically.
validate_proximity (bool): Whether to check that atoms are too
close.
charge_spin_check (bool): Whether to check that the charge and
spin multiplicity are compatible with each other. Defaults
to True.
"""
props = collections.defaultdict(list)
for site in sites:
for k, v in site.properties.items():
props[k].append(v)
return cls(
[site.species for site in sites],
[site.coords for site in sites],
charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=props,
charge_spin_check=charge_spin_check,
)
def break_bond(self, ind1: int, ind2: int, tol: float = 0.2) -> tuple[IMolecule | Molecule, ...]:
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
clusters = [[self._sites[ind1]], [self._sites[ind2]]]
sites = [site for i, site in enumerate(self._sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return tuple(type(self).from_sites(cluster) for cluster in clusters)
def get_covalent_bonds(self, tol: float = 0.2) -> list[CovalentBond]:
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.charge != other.charge:
return False
if self.spin_multiplicity != other.spin_multiplicity:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __repr__(self):
outs = ["Molecule Summary"]
for s in self:
outs.append(s.__repr__())
return "\n".join(outs)
def __str__(self):
outs = [
f"Full Formula ({self.composition.formula})",
"Reduced Formula: " + self.composition.reduced_formula,
f"Charge = {self._charge}, Spin Mult = {self._spin_multiplicity}",
f"Sites ({len(self)})",
]
for i, site in enumerate(self):
outs.append(
" ".join(
[
str(i),
site.species_string,
" ".join([f"{j:0.6f}".rjust(12) for j in site.coords]),
]
)
)
return "\n".join(outs)
def as_dict(self):
"""
JSON-serializable dict representation of Molecule
"""
d = {
"@module": type(self).__module__,
"@class": type(self).__name__,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"sites": [],
}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d) -> dict:
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
sites = [Site.from_dict(sd) for sd in d["sites"]]
charge = d.get("charge", 0)
spin_multiplicity = d.get("spin_multiplicity")
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity)
def get_distance(self, i: int, j: int) -> float:
"""
Get distance between site i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
Distance between the two sites.
"""
return self[i].distance(self[j])
def get_sites_in_sphere(self, pt: ArrayLike, r: float) -> list[Neighbor]:
"""
Find all sites within a sphere from a point.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere
r (float): Radius of sphere.
Returns:
[:class:`pymatgen.core.structure.Neighbor`]
"""
neighbors = []
for i, site in enumerate(self._sites):
dist = site.distance_from_point(pt)
if dist <= r:
neighbors.append(Neighbor(site.species, site.coords, site.properties, dist, i))
return neighbors
def get_neighbors(self, site: Site, r: float) -> list[Neighbor]:
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Site at the center of the sphere.
r (float): Radius of sphere.
Returns:
[:class:`pymatgen.core.structure.Neighbor`]
"""
nns = self.get_sites_in_sphere(site.coords, r)
return [nn for nn in nns if nn != site]
def get_neighbors_in_shell(self, origin: ArrayLike, r: float, dr: float) -> list[Neighbor]:
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
Returns:
[:class:`pymatgen.core.structure.Neighbor`]
"""
outer = self.get_sites_in_sphere(origin, r + dr)
inner = r - dr
return [nn for nn in outer if nn.nn_distance > inner]
def get_boxed_structure(
self,
a: float,
b: float,
c: float,
images: ArrayLike = (1, 1, 1),
random_rotation: bool = False,
min_dist: float = 1.0,
cls=None,
offset: ArrayLike = None,
no_cross: bool = False,
reorder: bool = True,
) -> IStructure | Structure:
"""
Creates a Structure from a Molecule by putting the Molecule in the
center of a orthorhombic box. Useful for creating Structure for
calculating molecules using periodic codes.
Args:
a (float): a-lattice parameter.
b (float): b-lattice parameter.
c (float): c-lattice parameter.
images: No. of boxed images in each direction. Defaults to
(1, 1, 1), meaning single molecule with 1 lattice parameter
in each direction.
random_rotation (bool): Whether to apply a random rotation to
each molecule. This jumbles all the molecules so that they
are not exact images of each other.
min_dist (float): The minimum distance that atoms should be from
each other. This is only used if random_rotation is True.
The randomized rotations are searched such that no two atoms
are less than min_dist from each other.
cls: The Structure class to instantiate (defaults to pymatgen
structure)
offset: Translation to offset molecule from center of mass coords
no_cross: Whether to forbid molecule coords from extending beyond
boundary of box.
reorder: Whether to reorder the sites to be in electronegativity
order.
Returns:
Structure containing molecule in a box.
"""
if offset is None:
offset = np.array([0, 0, 0])
coords = np.array(self.cart_coords)
x_range = max(coords[:, 0]) - min(coords[:, 0])
y_range = max(coords[:, 1]) - min(coords[:, 1])
z_range = max(coords[:, 2]) - min(coords[:, 2])
if a <= x_range or b <= y_range or c <= z_range:
raise ValueError("Box is not big enough to contain Molecule.")
lattice = Lattice.from_parameters(a * images[0], b * images[1], c * images[2], 90, 90, 90) # type: ignore
nimages = images[0] * images[1] * images[2] # type: ignore
all_coords: list[ArrayLike] = []
centered_coords = self.cart_coords - self.center_of_mass + offset
for i, j, k in itertools.product(
list(range(images[0])), list(range(images[1])), list(range(images[2])) # type: ignore
):
box_center = [(i + 0.5) * a, (j + 0.5) * b, (k + 0.5) * c]
if random_rotation:
while True:
op = SymmOp.from_origin_axis_angle(
(0, 0, 0),
axis=np.random.rand(3),
angle=random.uniform(-180, 180),
)
m = op.rotation_matrix
new_coords = np.dot(m, centered_coords.T).T + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
if len(all_coords) == 0:
break
distances = lattice.get_all_distances(
lattice.get_fractional_coords(new_coords),
lattice.get_fractional_coords(all_coords),
)
if np.amin(distances) > min_dist:
break
else:
new_coords = centered_coords + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
all_coords.extend(new_coords)
sprops = {k: v * nimages for k, v in self.site_properties.items()} # type: ignore
if cls is None:
cls = Structure
if reorder:
return cls(
lattice,
self.species * nimages, # type: ignore
all_coords,
coords_are_cartesian=True,
site_properties=sprops,
).get_sorted_structure()
return cls(
lattice,
self.species * nimages, # type: ignore
coords,
coords_are_cartesian=True,
site_properties=sprops,
)
def get_centered_molecule(self) -> IMolecule | Molecule:
"""
Returns a Molecule centered at the center of mass.
Returns:
Molecule centered with center of mass at origin.
"""
center = self.center_of_mass
new_coords = np.array(self.cart_coords) - center
return self.__class__(
self.species_and_occu,
new_coords,
charge=self._charge,
spin_multiplicity=self._spin_multiplicity,
site_properties=self.site_properties,
charge_spin_check=self._charge_spin_check,
)
def to(self, fmt=None, filename=None):
"""
Outputs the molecule to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.babel import BabelMolAdaptor
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.xyz import XYZ
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename or "")
if fmt == "xyz" or fnmatch(fname.lower(), "*.xyz*"):
writer = XYZ(self)
elif any(fmt == r or fnmatch(fname.lower(), f"*.{r}*") for r in ["gjf", "g03", "g09", "com", "inp"]):
writer = GaussianInput(self)
elif fmt == "json" or fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
if filename:
with zopen(filename, "wt", encoding="utf8") as f:
return json.dump(self.as_dict(), f)
else:
return json.dumps(self.as_dict())
elif fmt == "yaml" or fnmatch(fname, "*.yaml*"):
yaml = YAML()
if filename:
with zopen(fname, "wt", encoding="utf8") as f:
return yaml.dump(self.as_dict(), f)
else:
sio = StringIO()
yaml.dump(self.as_dict(), sio)
return sio.getvalue()
else:
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)", fname.lower())
if (not fmt) and m:
fmt = m.group(1)
writer = BabelMolAdaptor(self)
return writer.write_file(filename, file_format=fmt)
if filename:
writer.write_file(filename)
return str(writer)
@classmethod
def from_str(cls, input_string: str, fmt: str):
"""
Reads the molecule from a string.
Args:
input_string (str): String to parse.
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
Returns:
IMolecule or Molecule.
"""
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.xyz import XYZ
if fmt.lower() == "xyz":
m = XYZ.from_string(input_string).molecule
elif fmt in ["gjf", "g03", "g09", "com", "inp"]:
m = GaussianInput.from_string(input_string).molecule
elif fmt == "json":
d = json.loads(input_string)
return cls.from_dict(d)
elif fmt == "yaml":
yaml = YAML()
d = yaml.load(input_string)
return cls.from_dict(d)
else:
from pymatgen.io.babel import BabelMolAdaptor
m = BabelMolAdaptor.from_string(input_string, file_format=fmt).pymatgen_mol
return cls.from_sites(m)
@classmethod
def from_file(cls, filename):
"""
Reads a molecule from a file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON-serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be
installed.
Args:
filename (str): The filename to read from.
Returns:
Molecule
"""
filename = str(filename)
from pymatgen.io.gaussian import GaussianOutput
with zopen(filename) as f:
contents = f.read()
fname = filename.lower()
if fnmatch(fname, "*.xyz*"):
return cls.from_str(contents, fmt="xyz")
if any(fnmatch(fname.lower(), f"*.{r}*") for r in ["gjf", "g03", "g09", "com", "inp"]):
return cls.from_str(contents, fmt="g09")
if any(fnmatch(fname.lower(), f"*.{r}*") for r in ["out", "lis", "log"]):
return GaussianOutput(filename).final_structure
if fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json")
if fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml")
from pymatgen.io.babel import BabelMolAdaptor
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)", filename.lower())
if m:
new = BabelMolAdaptor.from_file(filename, m.group(1)).pymatgen_mol
new.__class__ = cls
return new
raise ValueError("Cannot determine file type.")
class Structure(IStructure, collections.abc.MutableSequence):
"""
Mutable version of structure.
"""
__hash__ = None # type: ignore
def __init__(
self,
lattice: ArrayLike | Lattice,
species: Sequence[CompositionLike],
coords: Sequence[ArrayLike],
charge: float = None,
validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None,
):
"""
Create a periodic structure.
Args:
lattice: The lattice, either as a pymatgen.core.lattice.Lattice or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in Cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
super().__init__(
lattice,
species,
coords,
charge=charge,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
)
self._sites: list[PeriodicSite] = list(self._sites) # type: ignore
def __setitem__( # type: ignore
self, i: int | slice | Sequence[int] | SpeciesLike, site: SpeciesLike | PeriodicSite | Sequence
):
"""
Modify a site in the structure.
Args:
i (int, [int], slice, Species-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Species/Sequence): Three options exist. You
can provide a PeriodicSite directly (lattice will be
checked). Or more conveniently, you can provide a
specie-like object or a tuple of up to length 3.
Examples:
s[0] = "Fe"
s[0] = Element("Fe")
both replaces the species only.
s[0] = "Fe", [0.5, 0.5, 0.5]
Replaces site and *fractional* coordinates. Any properties
are inherited from current site.
s[0] = "Fe", [0.5, 0.5, 0.5], {"spin": 2}
Replaces site and *fractional* coordinates and properties.
s[(0, 2, 3)] = "Fe"
Replaces sites 0, 2 and 3 with Fe.
s[0::2] = "Fe"
Replaces all even index sites with Fe.
s["Mn"] = "Fe"
Replaces all Mn in the structure with Fe. This is
a short form for the more complex replace_species.
s["Mn"] = "Fe0.5Co0.5"
Replaces all Mn in the structure with Fe: 0.5, Co: 0.5, i.e.,
creates a disordered structure!
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Species)):
self.replace_species({i: site}) # type: ignore
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites) if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, PeriodicSite):
if site.lattice != self._lattice:
raise ValueError("PeriodicSite added must have same lattice as Structure!")
if len(indices) != 1:
raise ValueError("Site assignments makes sense only for single int indices!")
self._sites[ii] = site
else:
if isinstance(site, str) or (not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site # type: ignore
else:
self._sites[ii].species = site[0] # type: ignore
if len(site) > 1:
self._sites[ii].frac_coords = site[1] # type: ignore
if len(site) > 2:
self._sites[ii].properties = site[2] # type: ignore
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
@property
def lattice(self) -> Lattice:
"""
:return: Lattice associated with structure.
"""
return self._lattice
@lattice.setter
def lattice(self, lattice: ArrayLike | Lattice):
if not isinstance(lattice, Lattice):
lattice = Lattice(lattice)
self._lattice = lattice
for site in self._sites:
site.lattice = lattice
def append( # type: ignore
self,
species: CompositionLike,
coords: ArrayLike,
coords_are_cartesian: bool = False,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Append a site to the structure.
Args:
species: Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties of the site.
Returns:
New structure with inserted site.
"""
return self.insert(
len(self),
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=properties,
)
def insert( # type: ignore
self,
i: int,
species: CompositionLike,
coords: ArrayLike,
coords_are_cartesian: bool = False,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Insert a site to the structure.
Args:
i (int): Index to insert site
species (species-like): Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties associated with the site.
Returns:
New structure with inserted site.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice, properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing site!")
self._sites.insert(i, new_site)
def replace(
self,
i: int,
species: CompositionLike,
coords: ArrayLike = None,
coords_are_cartesian: bool = False,
properties: dict = None,
):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
i (int): Index of the site in the _sites list.
species (species-like): Species of replacement site
coords (3x1 array): Coordinates of replacement site. If None,
the current coordinates are assumed.
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
properties (dict): Properties associated with the site.
"""
if coords is None:
frac_coords = self[i].frac_coords
elif coords_are_cartesian:
frac_coords = self._lattice.get_fractional_coords(coords)
else:
frac_coords = coords
new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties)
self._sites[i] = new_site
def substitute(self, index: int, func_group: IMolecule | Molecule | str, bond_order: int = 1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_group: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist, _, _ in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2, _, _ in self.get_neighbors(nn, 3):
if inn != self[index] and dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if not isinstance(func_group, Molecule):
# Check to see whether the functional group is in database.
if func_group not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. Provide explicit coordinate instead")
fgroup = FunctionalGroups[func_group]
else:
fgroup = func_group
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
try:
bl = get_bond_length(non_terminal_nn.specie, fgroup[1].specie, bond_order=bond_order)
# Catches for case of incompatibility between Element(s) and Species(s)
except TypeError:
bl = None
if bl is not None:
fgroup = fgroup.copy()
vec = fgroup[0].coords - fgroup[1].coords
vec /= np.linalg.norm(vec)
fgroup[0] = "X", fgroup[1].coords + float(bl) * vec
# Align X to the origin.
x = fgroup[0]
fgroup.translate_sites(list(range(len(fgroup))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = fgroup[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
fgroup.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i, fg in enumerate(fgroup):
fgroup[i] = (fg.species, origin - (fg.coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in fgroup[1:]:
s_new = PeriodicSite(site.species, site.coords, self.lattice, coords_are_cartesian=True)
self._sites.append(s_new)
def remove_species(self, species: Sequence[SpeciesLike]):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items() if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(
PeriodicSite(
new_sp_occu,
site.frac_coords,
self._lattice,
properties=site.properties,
)
)
self._sites = new_sites
def remove_sites(self, indices: Sequence[int]) -> None:
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [s for i, s in enumerate(self._sites) if i not in indices]
def apply_operation(self, symmop: SymmOp, fractional: bool = False) -> Structure:
"""
Apply a symmetry operation to the structure in place and return the modified
structure. The lattice is operated on by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in Cartesian coordinates.
Returns:
Structure: post-operation structure
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row) for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(
site.species,
new_frac,
self._lattice,
properties=site.properties,
skip_checks=True,
)
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(
site.species,
symmop.operate(site.frac_coords),
self._lattice,
properties=site.properties,
skip_checks=True,
)
self._sites = [operate_site(s) for s in self._sites]
return self
def apply_strain(self, strain: ArrayLike) -> None:
"""
Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger.
"""
s = (1 + np.array(strain)) * np.eye(3)
self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
def sort(self, key: Callable = None, reverse: bool = False) -> None:
"""
Sort a structure in place. The parameters have the same meaning as in
list.sort. By default, sites are sorted by the electronegativity of
the species. The difference between this method and
get_sorted_structure (which also works in IStructure) is that the
latter returns a new Structure, while this just sorts the Structure
in place.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
self._sites.sort(key=key, reverse=reverse)
def translate_sites(
self, indices: int | Sequence[int], vector: ArrayLike, frac_coords: bool = True, to_unit_cell: bool = True
) -> None:
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices: Integer or List of site indices on which to perform the
translation.
vector: Translation vector for sites.
frac_coords (bool): Whether the vector corresponds to fractional or
Cartesian coordinates.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
if not isinstance(indices, collections.abc.Iterable):
indices = [indices]
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(site.coords + vector)
if to_unit_cell:
fcoords = [np.mod(f, 1) if p else f for p, f in zip(self.lattice.pbc, fcoords)]
self._sites[i].frac_coords = fcoords
def rotate_sites(
self,
indices: list[int] = None,
theta: float = 0.0,
axis: ArrayLike = None,
anchor: ArrayLike = None,
to_unit_cell: bool = True,
) -> None:
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
from numpy import cross, eye
from numpy.linalg import norm
from scipy.linalg import expm
if indices is None:
indices = list(range(len(self)))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel()
new_site = PeriodicSite(
site.species,
coords,
self._lattice,
to_unit_cell=to_unit_cell,
coords_are_cartesian=True,
properties=site.properties,
skip_checks=True,
)
self._sites[i] = new_site
def perturb(self, distance: float, min_distance: float = None) -> None:
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
min_distance (None, int, or float): if None, all displacements will
be equal amplitude. If int or float, perturb each site a
distance drawn from the uniform distribution between
'min_distance' and 'distance'.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
dist = distance
if isinstance(min_distance, (float, int)):
dist = np.random.uniform(min_distance, dist)
return vector / vnorm * dist if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False)
def make_supercell(self, scaling_matrix: ArrayLike, to_unit_cell: bool = True) -> None:
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
to_unit_cell: Whether or not to fall back sites into the unit cell
"""
s = self * scaling_matrix
if to_unit_cell:
for site in s:
site.to_unit_cell(in_place=True)
self._sites = s.sites
self._lattice = s.lattice
def scale_lattice(self, volume: float) -> None:
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.lattice = self._lattice.scale(volume)
def merge_sites(self, tol: float = 0.01, mode: Literal["sum", "delete", "average"] = "sum") -> None:
"""
Merges sites (adding occupancies) within tol of each other.
Removes site properties.
Args:
tol (float): Tolerance for distance to merge sites.
mode ('sum' | 'delete' | 'average'): "delete" means duplicate sites are
deleted. "sum" means the occupancies are summed for the sites.
"average" means that the site is deleted but the properties are averaged
Only first letter is considered.
"""
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import squareform
d = self.distance_matrix
np.fill_diagonal(d, 0)
clusters = fcluster(linkage(squareform((d + d.T) / 2)), tol, "distance")
sites = []
for c in np.unique(clusters):
inds = np.where(clusters == c)[0]
species = self[inds[0]].species
coords = self[inds[0]].frac_coords
props = self[inds[0]].properties
for n, i in enumerate(inds[1:]):
sp = self[i].species
if mode.lower()[0] == "s":
species += sp
offset = self[i].frac_coords - coords
coords = coords + ((offset - np.round(offset)) / (n + 2)).astype(coords.dtype)
for key in props.keys():
if props[key] is not None and self[i].properties[key] != props[key]:
if mode.lower()[0] == "a" and isinstance(props[key], float):
# update a running total
props[key] = props[key] * (n + 1) / (n + 2) + self[i].properties[key] / (n + 2)
else:
props[key] = None
warnings.warn(
f"Sites with different site property {key} are merged. So property is set to none"
)
sites.append(PeriodicSite(species, coords, self.lattice, properties=props))
self._sites = sites
def set_charge(self, new_charge: float = 0.0) -> None:
"""
Sets the overall structure charge
Args:
new_charge (float): new charge to set
"""
self._charge = new_charge
class Molecule(IMolecule, collections.abc.MutableSequence):
"""
Mutable Molecule. It has all the methods in IMolecule, but in addition,
it allows a user to perform edits on the molecule.
"""
__hash__ = None # type: ignore
def __init__(
self,
species: Sequence[SpeciesLike],
coords: Sequence[ArrayLike],
charge: float = 0.0,
spin_multiplicity: int = None,
validate_proximity: bool = False,
site_properties: dict = None,
charge_spin_check: bool = True,
) -> None:
"""
Creates a MutableMolecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Species, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of Cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
charge_spin_check (bool): Whether to check that the charge and
spin multiplicity are compatible with each other. Defaults
to True.
"""
super().__init__(
species,
coords,
charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=site_properties,
charge_spin_check=charge_spin_check,
)
self._sites: list[Site] = list(self._sites) # type: ignore
def __setitem__( # type: ignore
self, i: int | slice | Sequence[int] | SpeciesLike, site: SpeciesLike | Site | Sequence
) -> None:
"""
Modify a site in the molecule.
Args:
i (int, [int], slice, Species-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Species/Sequence): Three options exist. You can
provide a Site directly, or for convenience, you can provide
simply a Species-like string/object, or finally a (Species,
coords) sequence, e.g., ("Fe", [0.5, 0.5, 0.5]).
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Species)):
self.replace_species({i: site}) # type: ignore
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites) if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, Site):
self._sites[ii] = site
else:
if isinstance(site, str) or (not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site # type: ignore
else:
self._sites[ii].species = site[0] # type: ignore
if len(site) > 1:
self._sites[ii].coords = site[1] # type: ignore
if len(site) > 2:
self._sites[ii].properties = site[2] # type: ignore
def __delitem__(self, idx) -> None:
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(idx)
def append( # type: ignore
self,
species: CompositionLike,
coords: ArrayLike,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Appends a site to the molecule.
Args:
species: Species of inserted site
coords: Coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): A dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
return self.insert(
len(self),
species,
coords,
validate_proximity=validate_proximity,
properties=properties,
)
def set_charge_and_spin(self, charge: float, spin_multiplicity: int | None = None):
"""
Set the charge and spin multiplicity.
Args:
charge (int): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
"""
self._charge = charge
nelectrons = 0.0
for site in self._sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecies):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if self._charge_spin_check and (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
f"Charge of {self._charge} and spin multiplicity of {spin_multiplicity} is"
" not possible for this molecule"
)
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
def insert( # type: ignore
self,
i: int,
species: CompositionLike,
coords: ArrayLike,
validate_proximity: bool = False,
properties: dict = None,
):
"""
Insert a site to the molecule.
Args:
i (int): Index to insert site
species: species of inserted site
coords (3x1 array): coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): Dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
new_site = Site(species, coords, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing site!")
self._sites.insert(i, new_site)
def remove_species(self, species: Sequence[SpeciesLike]):
"""
Remove all occurrences of a species from a molecule.
Args:
species: Species to remove.
"""
new_sites = []
species = [get_el_sp(sp) for sp in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items() if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(Site(new_sp_occu, site.coords, properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices: Sequence[int]):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites)) if i not in indices]
def translate_sites(self, indices: Sequence[int] = None, vector: ArrayLike = None):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices (list): List of site indices on which to perform the
translation.
vector (3x1 array): Translation vector for sites.
"""
if indices is None:
indices = range(len(self))
if vector is None:
vector = [0, 0, 0]
for i in indices:
site = self._sites[i]
new_site = Site(site.species, site.coords + vector, properties=site.properties)
self._sites[i] = new_site
def rotate_sites(
self, indices: Sequence[int] = None, theta: float = 0.0, axis: ArrayLike = None, anchor: ArrayLike = None
):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
"""
from numpy import cross, eye
from numpy.linalg import norm
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
s = ((np.dot(rm, (site.coords - anchor).T)).T + anchor).ravel()
new_site = Site(site.species, s, properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance: float):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec())
def apply_operation(self, symmop: SymmOp):
"""
Apply a symmetry operation to the molecule.
Args:
symmop (SymmOp): Symmetry operation to apply.
"""
def operate_site(site):
new_cart = symmop.operate(site.coords)
return Site(site.species, new_cart, properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def copy(self):
"""
Convenience method to get a copy of the molecule.
Returns:
A copy of the Molecule.
"""
return type(self).from_sites(self)
def substitute(self, index: int, func_group: IMolecule | Molecule | str, bond_order: int = 1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for nn2 in self.get_neighbors(nn, 3):
if nn2 != self[index] and nn2.nn_distance < 1.2 * get_bond_length(nn.specie, nn2.specie):
all_non_terminal_nn.append(nn)
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda nn: nn.nn_distance)
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_group, Molecule):
func_grp = func_group
else:
# Check to see whether the functional group is in database.
if func_group not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. Provide explicit coordinate instead")
func_grp = FunctionalGroups[func_group]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie, bond_order=bond_order)
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
vec /= np.linalg.norm(vec)
func_grp[0] = "X", func_grp[1].coords + float(bl) * vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i, fg in enumerate(func_grp):
func_grp[i] = (fg.species, origin - (fg.coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
self._sites.append(site)
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
with open(os.path.join(os.path.dirname(__file__), "func_groups.json")) as f:
FunctionalGroups = {k: Molecule(v["species"], v["coords"]) for k, v in json.load(f).items()}
| {
"content_hash": "223c5d6a43983216c1fc82d908aa348c",
"timestamp": "",
"source": "github",
"line_count": 4396,
"max_line_length": 120,
"avg_line_length": 40.10122838944495,
"alnum_prop": 0.5568142496525512,
"repo_name": "gVallverdu/pymatgen",
"id": "add0ed4b268a0d79c8a1526eea87d5e53a44f772",
"size": "176379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/core/structure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "39738"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9200904"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
} |
"""Module for working with regions of interest.
This module contains the :class:`jicimagelib.region.Region` class,
which can be used to represent regions of interest.
One can use the
:func:`jicimagelib.region.Region.select_from_array` class method
to select a region from a numeric array.
>>> im = np.array([
... [0, 0, 0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0, 0, 0],
... [0, 1, 1, 1, 2, 2, 2],
... [0, 1, 1, 1, 2, 2, 2],
... [0, 0, 2, 2, 2, 2, 2],
... [0, 0, 2, 2, 2, 2, 2],
... [0, 0, 2, 2, 2, 2, 2]], dtype=np.uint8)
...
>>> Region.select_from_array(im, 2)
array([[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, True, True, True],
[False, False, False, False, True, True, True],
[False, False, True, True, True, True, True],
[False, False, True, True, True, True, True],
[False, False, True, True, True, True, True]], dtype=bool)
Alternatively a region can be created from a bitmap.
>>> import numpy as np
>>> bitmap = np.array([
... [False, False, False, False, False, False, False],
... [False, True, False, False, False, True, False],
... [False, True, True, False, True, True, False],
... [False, True, True, True, True, True, False],
... [False, True, True, True, True, True, False],
... [False, False, True, True, True, False, False],
... [False, False, False, True, False, False, False]], dtype=np.bool)
...
>>> roi = Region(bitmap)
A region has several handy properties.
>>> roi.area
20
>>> roi.perimeter
14
The latter is calculated from the border.
>>> roi.border
array([[False, False, False, False, False, False, False],
[False, True, False, False, False, True, False],
[False, True, True, False, True, True, False],
[False, True, False, True, False, True, False],
[False, True, False, False, False, True, False],
[False, False, True, False, True, False, False],
[False, False, False, True, False, False, False]], dtype=bool)
Another handy property is the convex hull.
>>> roi.convex_hull
array([[False, False, False, False, False, False, False],
[False, True, True, True, True, True, False],
[False, True, True, True, True, True, False],
[False, True, True, True, True, True, False],
[False, True, True, True, True, True, False],
[False, False, True, True, True, False, False],
[False, False, False, True, False, False, False]], dtype=bool)
"""
import scipy.ndimage as nd
import numpy as np
import skimage
import skimage.morphology
class Region(object):
"""Class representing a region of interest in an image.
Represented as a bitmask with True indicating the region of interest.
"""
def __init__(self, bitmap):
bitmap_values = set(np.unique(bitmap))
if len(bitmap_values - set([0, 1])):
raise(ValueError('Region bitmap must have only 0 and 1 values'))
self.bitmap = bitmap.astype(bool)
@classmethod
def select_from_array(cls, array, identifier):
"""Return a region from a numpy array.
:param array: :class:`numpy.ndarray`
:param identifier: value representing the region to select in the array
:returns: :class:`jicimagelib.region.Region`
"""
base_array = np.zeros(array.shape)
array_coords = np.where(array == identifier)
base_array[array_coords] = 1
return cls(base_array)
@property
def inner(self):
"""Region formed by taking non-border elements.
:returns: :class:`jicimagelib.region.Region`
"""
inner_array = nd.morphology.binary_erosion(self.bitmap)
return Region(inner_array)
@property
def border(self):
"""Region formed by taking border elements.
:returns: :class:`jicimagelib.region.Region`
"""
border_array = self.bitmap - self.inner.bitmap
return Region(border_array)
@property
def convex_hull(self):
"""Region representing the convex hull.
:returns: :class:`jicimagelib.region.Region`
"""
hull_array = skimage.morphology.convex_hull_image(self.bitmap)
return Region(hull_array)
@property
def area(self):
"""Number of non-zero elements.
:returns: int
"""
return np.count_nonzero(self.bitmap)
@property
def index_arrays(self):
"""All nonzero elements as a pair of arrays."""
return np.where(self.bitmap == True)
@property
def points(self):
"""Region as a list of points."""
return zip(*self.index_arrays)
@property
def perimeter(self):
"""Return the perimiter.
:returns: int
"""
return self.border.area
def dilate(self, iterations=1):
"""Return a dilated region.
:param iterations: number of iterations to use in dilation
:returns: :class:`jicimagelib.region.Region`
"""
dilated_array = nd.morphology.binary_dilation(self.bitmap,
iterations=iterations)
return Region(dilated_array)
def __repr__(self):
return self.bitmap.__repr__()
def __str__(self):
return self.bitmap.__str__()
| {
"content_hash": "2457e2bfae1ca33cca44bf33c5da7ed1",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 79,
"avg_line_length": 30.829545454545453,
"alnum_prop": 0.5888315517876889,
"repo_name": "JIC-CSB/jicimagelib",
"id": "a2e7b16db5bc1043bd71b8dc3fa70b3145f0b1fa",
"size": "5426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jicimagelib/region.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6871"
},
{
"name": "Python",
"bytes": "113623"
}
],
"symlink_target": ""
} |
class Solution(object):
def dfs(self, node, left):
if node is None:
return 0
if node.left is None and node.right is None and left:
return node.val
ret = 0
if node.left is not None:
ret += self.dfs(node.left, True)
if node.right is not None:
ret += self.dfs(node.right, False)
return ret
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.dfs(root, False) | {
"content_hash": "af5ec838a8977cfe110ada3e660c65ec",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 55,
"avg_line_length": 21.4,
"alnum_prop": 0.6542056074766355,
"repo_name": "xingjian-f/Leetcode-solution",
"id": "ed258c664959b0401fbaa56646cd4446e21a4a92",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "404. Sum of Left Leaves.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "562414"
}
],
"symlink_target": ""
} |
"""
Routes for pyramid_pages
"""
from sqlalchemy import or_
from pyramid.events import BeforeRender
from pyramid.location import lineage
from pyramid.httpexceptions import HTTPNotFound
from . import CONFIG_MODELS, CONFIG_DBSESSION
from .security import HOME_PAGE, PREFIX_PAGE
from .resources import (
BasePageResource,
models_of_config,
resource_of_node,
resources_of_config
)
def add_globals(event):
event['lineage'] = lineage
def page_factory(request):
""" Page factory.
Config models example:
.. code-block:: python
models = {
'': [WebPage, CatalogResource],
'catalogue': CatalogResource,
'news': NewsResource,
}
"""
prefix = request.matchdict['prefix'] # /{prefix}/page1/page2/page3...
settings = request.registry.settings
dbsession = settings[CONFIG_DBSESSION]
config = settings[CONFIG_MODELS]
if prefix not in config:
# prepend {prefix} to *traverse
request.matchdict['traverse'] =\
tuple([prefix] + list(request.matchdict['traverse']))
prefix = None
# Get all resources and models from config with the same prefix.
resources = config.get(
prefix, config.get( # 1. get resources with prefix same as URL prefix
'', config.get( # 2. if not, then try to get empty prefix
'/', None))) # 3. else try to get prefix '/' otherwise None
if not hasattr(resources, '__iter__'):
resources = (resources, )
tree = {}
if not resources:
return tree
# Add top level nodes of resources in the tree
for resource in resources:
table = None
if not hasattr(resource, '__table__')\
and hasattr(resource, 'model'):
table = resource.model
else:
table = resource
if not hasattr(table, 'slug'):
continue
nodes = dbsession.query(table)
if hasattr(table, 'parent_id'):
nodes = nodes.filter(or_(
table.parent_id == None, # noqa
table.parent.has(table.slug == '/')
))
for node in nodes:
if not node.slug:
continue
resource = resource_of_node(resources, node)
tree[node.slug] = resource(node, prefix=prefix)
return tree
def home_page_factory(request):
settings = request.registry.settings
dbsession = settings[CONFIG_DBSESSION]
config = settings[CONFIG_MODELS]
models = models_of_config(config)
resources = resources_of_config(config)
for table in models:
if not hasattr(table, 'slug'):
continue
node = dbsession.query(table).filter(table.slug == '/').first()
if node:
return resource_of_node(resources, node)(node)
raise HTTPNotFound
def register_views(*args):
""" Registration view for each resource from config.
"""
config = args[0]
settings = config.get_settings()
pages_config = settings[CONFIG_MODELS]
resources = resources_of_config(pages_config)
for resource in resources:
if hasattr(resource, '__table__')\
and not hasattr(resource, 'model'):
continue
resource.model.pyramid_pages_template = resource.template
config.add_view(resource.view,
attr=resource.attr,
route_name=PREFIX_PAGE,
renderer=resource.template,
context=resource,
permission=PREFIX_PAGE)
def includeme(config):
config.add_subscriber(add_globals, BeforeRender)
# Home page factory
config.add_route(HOME_PAGE, '/', factory=home_page_factory)
config.add_view(BasePageResource.view,
attr=BasePageResource.attr,
route_name=HOME_PAGE,
renderer=BasePageResource.template,
context=BasePageResource,
permission=HOME_PAGE)
# Default page factory
config.add_route(PREFIX_PAGE, '/{prefix}*traverse', factory=page_factory)
config.add_view(BasePageResource.view,
attr=BasePageResource.attr,
route_name=PREFIX_PAGE,
renderer=BasePageResource.template,
context=BasePageResource,
permission=PREFIX_PAGE)
import pkg_resources
pyramid_version = pkg_resources.get_distribution("pyramid").parsed_version
# if pyramid_version >= pkg_resources.SetuptoolsVersion('1.6a1'):
# Allow you to change settings after including this function. This
# fuature works only in version 1.6 or above.
config.action('pyramid_pages_routes', register_views, args=(config, ))
# else:
# config.include(register_views)
| {
"content_hash": "55b4618249aa8ab95aa9086d7318187e",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 79,
"avg_line_length": 32.28666666666667,
"alnum_prop": 0.6029320669006814,
"repo_name": "ITCase/pyramid_pages",
"id": "044c3225917badd41ca01990734702f0468df45b",
"size": "5006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyramid_pages/routes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109382"
},
{
"name": "JavaScript",
"bytes": "13099"
},
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "39013"
},
{
"name": "Shell",
"bytes": "572"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "qipr_approver.settings")
from django.core.management import execute_from_command_line
import django
django.setup()
from approver.signals.bridge.all_signals import disconnect_signals
if sys.argv[1] == 'loadmesh':
disconnect_signals()
from approver.custom_commands import loadmesh
loadmesh(sys.argv)
elif sys.argv[1] == 'loadcontacts':
disconnect_signals()
from approver.custom_commands import loadcontacts
loadcontacts(sys.argv)
elif sys.argv[1] == 'loadprojects':
disconnect_signals()
from approver.custom_commands import loadprojects
loadprojects(sys.argv)
elif sys.argv[1] == 'dedupemesh':
disconnect_signals()
from approver.custom_commands import dedupemesh
dedupemesh(sys.argv)
else:
execute_from_command_line(sys.argv)
| {
"content_hash": "b19cae24ca7ef1f0ded052ed991db625",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 31.29032258064516,
"alnum_prop": 0.668041237113402,
"repo_name": "DevMattM/qipr_approver",
"id": "00ee1d92c103ea982e3e199914b825f8e45de433",
"size": "992",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qipr_approver/manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "201040"
},
{
"name": "HTML",
"bytes": "81821"
},
{
"name": "JavaScript",
"bytes": "304143"
},
{
"name": "Python",
"bytes": "244892"
},
{
"name": "Ruby",
"bytes": "809"
},
{
"name": "Shell",
"bytes": "5372"
},
{
"name": "Vim script",
"bytes": "1716"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import logging
import numpy
from six import text_type, string_types
logger = logging.getLogger()
class Vocabulary(object):
"""Class that holds a vocabulary for the dataset."""
BOS = '<bos>' # beginning-of-sequence
EOS = '<eos>' # end-of-sequence
BOD = '<bod>' # beginning-of-definition
EOD = '<eod>' # end-of-definition
UNK = '<unk>' # unknown token
SPECIAL_TOKEN_MAP = {
BOS: 'bos',
EOS: 'eos',
BOD: 'bod',
EOD: 'eod',
UNK: 'unk'
}
def __init__(self, path_or_data):
"""Initialize the vocabulary.
path_or_data
Either a list of words or the path to it.
top_k
If not `None`, only the first `top_k` entries will be left.
Note, this does not include the special tokens.
"""
if isinstance(path_or_data, string_types):
words_and_freqs = []
with open(path_or_data) as f:
for line in f:
word, freq_str = line.strip().split()
word = word.decode('utf-8')
freq = int(freq_str)
words_and_freqs.append((word, freq))
else:
words_and_freqs = path_or_data
self._id_to_word = []
self._id_to_freq = []
self._word_to_id = {}
self.bos = self.eos = -1
self.bod = self.eod = -1
self.unk = -1
for idx, (word_name, freq) in enumerate(words_and_freqs):
token_attr = self.SPECIAL_TOKEN_MAP.get(word_name)
if token_attr is not None:
setattr(self, token_attr, idx)
self._id_to_word.append(word_name)
self._id_to_freq.append(freq)
self._word_to_id[word_name] = idx
if -1 in [getattr(self, attr)
for attr in self.SPECIAL_TOKEN_MAP.values()]:
raise ValueError("special token not found in the vocabulary")
def size(self):
return len(self._id_to_word)
@property
def words(self):
return self._id_to_word
@property
def frequencies(self):
return self._id_to_freq
def word_to_id(self, word, top_k=None):
id_ = self._word_to_id.get(word)
if id_ is not None and not top_k or id_ < top_k:
return id_
return self.unk
def id_to_word(self, cur_id):
return self._id_to_word[cur_id]
def word_freq(self, word):
if not word in self._word_to_id:
return 0
return self._id_to_freq[self._word_to_id[word]]
def decode(self, cur_ids):
return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids])
def encode(self, sentence):
word_ids = [self.word_to_id(cur_word) for cur_word in sentence]
return numpy.array(word_ids, dtype=numpy.int64)
@staticmethod
def build(text, top_k=None, sort_by='frequency'):
"""
sort_by is either 'frequency' or 'lexicographical'
"""
# For now let's use a very stupid tokenization
if isinstance(text, str):
with open(text) as file_:
def data():
for line in file_:
for word in line.strip().split():
yield word
counter = Counter(data())
logger.info("Data is read")
else:
counter = Counter(text)
for word in list(counter.keys()):
if ' ' in word:
logger.error("can't have tokens with spaces, skip {}".format(word))
del counter[word]
# It was not immediately clear to me
# if counter.most_common() selects consistenly among
# the words with the same counts. Hence, let's just sort.
if sort_by == 'frequency':
sortf = lambda x: (-x[1], x[0])
elif sort_by == 'lexicographical':
sortf = lambda x: (x[0], x[1])
else:
raise Exception("sort not understood:", sort_by)
words_and_freqs = sorted(counter.items(), key=sortf)
logger.info("Words are sorted")
if top_k:
words_and_freqs = words_and_freqs[:top_k]
words_and_freqs = (
[(Vocabulary.BOS, 0),
(Vocabulary.EOS, 0),
(Vocabulary.BOD, 0),
(Vocabulary.EOD, 0),
(Vocabulary.UNK, 0)]
+ words_and_freqs)
return Vocabulary(words_and_freqs)
def save(self, filename):
with open(filename, 'w') as f:
for word, freq in zip(self._id_to_word, self._id_to_freq):
# Note: if this fails for you make sure that words read
# and used by Vocabulary were utf-8 encoded prior to that
if not isinstance(word, text_type):
word = text_type(word, "utf-8")
print(word.encode('utf-8'), freq, file=f)
| {
"content_hash": "5733438c1bd3fd11fe110d5281585d4f",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 87,
"avg_line_length": 33.08496732026144,
"alnum_prop": 0.5331884630580798,
"repo_name": "tombosc/dict_based_learning",
"id": "f5a43b16537780477e679300a867035715befcab",
"size": "5062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dictlearn/vocab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "178706"
},
{
"name": "Python",
"bytes": "468785"
},
{
"name": "Shell",
"bytes": "7802"
}
],
"symlink_target": ""
} |
"""Base classes for channels."""
from __future__ import annotations
import asyncio
from enum import Enum
from functools import partialmethod, wraps
import logging
from typing import TYPE_CHECKING, Any, TypedDict
import zigpy.exceptions
import zigpy.zcl
from zigpy.zcl.foundation import (
CommandSchema,
ConfigureReportingResponseRecord,
Status,
ZCLAttributeDef,
)
from homeassistant.const import ATTR_COMMAND
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from ..const import (
ATTR_ARGS,
ATTR_ATTRIBUTE_ID,
ATTR_ATTRIBUTE_NAME,
ATTR_CLUSTER_ID,
ATTR_PARAMS,
ATTR_TYPE,
ATTR_UNIQUE_ID,
ATTR_VALUE,
CHANNEL_ZDO,
REPORT_CONFIG_ATTR_PER_REQ,
SIGNAL_ATTR_UPDATED,
ZHA_CHANNEL_MSG,
ZHA_CHANNEL_MSG_BIND,
ZHA_CHANNEL_MSG_CFG_RPT,
ZHA_CHANNEL_MSG_DATA,
ZHA_CHANNEL_READS_PER_REQ,
)
from ..helpers import LogMixin, retryable_req, safe_read
if TYPE_CHECKING:
from . import ChannelPool
_LOGGER = logging.getLogger(__name__)
class AttrReportConfig(TypedDict, total=True):
"""Configuration to report for the attributes."""
# Could be either an attribute name or attribute id
attr: str | int
# The config for the attribute reporting configuration consists of a tuple for
# (minimum_reported_time_interval_s, maximum_reported_time_interval_s, value_delta)
config: tuple[int, int, int | float]
def parse_and_log_command(channel, tsn, command_id, args):
"""Parse and log a zigbee cluster command."""
cmd = channel.cluster.server_commands.get(command_id, [command_id])[0]
channel.debug(
"received '%s' command with %s args on cluster_id '%s' tsn '%s'",
cmd,
args,
channel.cluster.cluster_id,
tsn,
)
return cmd
def decorate_command(channel, command):
"""Wrap a cluster command to make it safe."""
@wraps(command)
async def wrapper(*args, **kwds):
try:
result = await command(*args, **kwds)
channel.debug(
"executed '%s' command with args: '%s' kwargs: '%s' result: %s",
command.__name__,
args,
kwds,
result,
)
return result
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
channel.debug(
"command failed: '%s' args: '%s' kwargs '%s' exception: '%s'",
command.__name__,
args,
kwds,
str(ex),
)
return ex
return wrapper
class ChannelStatus(Enum):
"""Status of a channel."""
CREATED = 1
CONFIGURED = 2
INITIALIZED = 3
class ZigbeeChannel(LogMixin):
"""Base channel for a Zigbee cluster."""
REPORT_CONFIG: tuple[AttrReportConfig, ...] = ()
BIND: bool = True
# Dict of attributes to read on channel initialization.
# Dict keys -- attribute ID or names, with bool value indicating whether a cached
# attribute read is acceptable.
ZCL_INIT_ATTRS: dict[int | str, bool] = {}
def __init__(self, cluster: zigpy.zcl.Cluster, ch_pool: ChannelPool) -> None:
"""Initialize ZigbeeChannel."""
self._generic_id = f"channel_0x{cluster.cluster_id:04x}"
self._ch_pool = ch_pool
self._cluster = cluster
self._id = f"{ch_pool.id}:0x{cluster.cluster_id:04x}"
unique_id = ch_pool.unique_id.replace("-", ":")
self._unique_id = f"{unique_id}:0x{cluster.cluster_id:04x}"
if not hasattr(self, "_value_attribute") and self.REPORT_CONFIG:
attr = self.REPORT_CONFIG[0].get("attr")
if isinstance(attr, str):
attribute: ZCLAttributeDef = self.cluster.attributes_by_name.get(attr)
if attribute is not None:
self.value_attribute = attribute.id
else:
self.value_attribute = None
else:
self.value_attribute = attr
self._status = ChannelStatus.CREATED
self._cluster.add_listener(self)
self.data_cache: dict[str, Enum] = {}
@property
def id(self) -> str:
"""Return channel id unique for this device only."""
return self._id
@property
def generic_id(self):
"""Return the generic id for this channel."""
return self._generic_id
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the zigpy cluster for this channel."""
return self._cluster
@property
def name(self) -> str:
"""Return friendly name."""
return self.cluster.ep_attribute or self._generic_id
@property
def status(self):
"""Return the status of the channel."""
return self._status
def __hash__(self) -> int:
"""Make this a hashable."""
return hash(self._unique_id)
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._ch_pool.async_send_signal(signal, *args)
async def bind(self):
"""Bind a zigbee cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
try:
res = await self.cluster.bind()
self.debug("bound '%s' cluster: %s", self.cluster.ep_attribute, res[0])
async_dispatcher_send(
self._ch_pool.hass,
ZHA_CHANNEL_MSG,
{
ATTR_TYPE: ZHA_CHANNEL_MSG_BIND,
ZHA_CHANNEL_MSG_DATA: {
"cluster_name": self.cluster.name,
"cluster_id": self.cluster.cluster_id,
"success": res[0] == 0,
},
},
)
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"Failed to bind '%s' cluster: %s", self.cluster.ep_attribute, str(ex)
)
async_dispatcher_send(
self._ch_pool.hass,
ZHA_CHANNEL_MSG,
{
ATTR_TYPE: ZHA_CHANNEL_MSG_BIND,
ZHA_CHANNEL_MSG_DATA: {
"cluster_name": self.cluster.name,
"cluster_id": self.cluster.cluster_id,
"success": False,
},
},
)
async def configure_reporting(self) -> None:
"""Configure attribute reporting for a cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
event_data = {}
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
for attr_report in self.REPORT_CONFIG:
attr, config = attr_report["attr"], attr_report["config"]
attr_name = self.cluster.attributes.get(attr, [attr])[0]
event_data[attr_name] = {
"min": config[0],
"max": config[1],
"id": attr,
"name": attr_name,
"change": config[2],
"success": False,
}
to_configure = [*self.REPORT_CONFIG]
chunk, rest = (
to_configure[:REPORT_CONFIG_ATTR_PER_REQ],
to_configure[REPORT_CONFIG_ATTR_PER_REQ:],
)
while chunk:
reports = {rec["attr"]: rec["config"] for rec in chunk}
try:
res = await self.cluster.configure_reporting_multiple(reports, **kwargs)
self._configure_reporting_status(reports, res[0])
# if we get a response, then it's a success
for attr_stat in event_data.values():
attr_stat["success"] = True
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting on '%s' cluster for: %s",
self.cluster.ep_attribute,
str(ex),
)
break
chunk, rest = (
rest[:REPORT_CONFIG_ATTR_PER_REQ],
rest[REPORT_CONFIG_ATTR_PER_REQ:],
)
async_dispatcher_send(
self._ch_pool.hass,
ZHA_CHANNEL_MSG,
{
ATTR_TYPE: ZHA_CHANNEL_MSG_CFG_RPT,
ZHA_CHANNEL_MSG_DATA: {
"cluster_name": self.cluster.name,
"cluster_id": self.cluster.cluster_id,
"attributes": event_data,
},
},
)
def _configure_reporting_status(
self, attrs: dict[int | str, tuple[int, int, float | int]], res: list | tuple
) -> None:
"""Parse configure reporting result."""
if isinstance(res, (Exception, ConfigureReportingResponseRecord)):
# assume default response
self.debug(
"attr reporting for '%s' on '%s': %s",
attrs,
self.name,
res,
)
return
if res[0].status == Status.SUCCESS and len(res) == 1:
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster: %s",
attrs,
self.name,
res,
)
return
failed = [
self.cluster.attributes.get(r.attrid, [r.attrid])[0]
for r in res
if r.status != Status.SUCCESS
]
attributes = {self.cluster.attributes.get(r, [r])[0] for r in attrs}
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster",
attributes - set(failed),
self.name,
)
self.debug(
"Failed to configure reporting for '%s' on '%s' cluster: %s",
failed,
self.name,
res,
)
async def async_configure(self) -> None:
"""Set cluster binding and attribute reporting."""
if not self._ch_pool.skip_configuration:
if self.BIND:
self.debug("Performing cluster binding")
await self.bind()
if self.cluster.is_server:
self.debug("Configuring cluster attribute reporting")
await self.configure_reporting()
ch_specific_cfg = getattr(self, "async_configure_channel_specific", None)
if ch_specific_cfg:
self.debug("Performing channel specific configuration")
await ch_specific_cfg()
self.debug("finished channel configuration")
else:
self.debug("skipping channel configuration")
self._status = ChannelStatus.CONFIGURED
@retryable_req(delays=(1, 1, 3))
async def async_initialize(self, from_cache: bool) -> None:
"""Initialize channel."""
if not from_cache and self._ch_pool.skip_configuration:
self.debug("Skipping channel initialization")
self._status = ChannelStatus.INITIALIZED
return
self.debug("initializing channel: from_cache: %s", from_cache)
cached = [a for a, cached in self.ZCL_INIT_ATTRS.items() if cached]
uncached = [a for a, cached in self.ZCL_INIT_ATTRS.items() if not cached]
uncached.extend([cfg["attr"] for cfg in self.REPORT_CONFIG])
if cached:
self.debug("initializing cached channel attributes: %s", cached)
await self._get_attributes(
True, cached, from_cache=True, only_cache=from_cache
)
if uncached:
self.debug(
"initializing uncached channel attributes: %s - from cache[%s]",
uncached,
from_cache,
)
await self._get_attributes(
True, uncached, from_cache=from_cache, only_cache=from_cache
)
ch_specific_init = getattr(self, "async_initialize_channel_specific", None)
if ch_specific_init:
self.debug("Performing channel specific initialization: %s", uncached)
await ch_specific_init(from_cache=from_cache)
self.debug("finished channel initialization")
self._status = ChannelStatus.INITIALIZED
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self._get_attribute_name(attrid),
value,
)
@callback
def zdo_command(self, *args, **kwargs):
"""Handle ZDO commands on this cluster."""
@callback
def zha_send_event(self, command: str, arg: list | dict | CommandSchema) -> None:
"""Relay events to hass."""
args: list | dict
if isinstance(arg, CommandSchema):
args = [a for a in arg if a is not None]
params = arg.as_dict()
elif isinstance(arg, (list, dict)):
# Quirks can directly send lists and dicts to ZHA this way
args = arg
params = {}
else:
raise TypeError(f"Unexpected zha_send_event {command!r} argument: {arg!r}")
self._ch_pool.zha_send_event(
{
ATTR_UNIQUE_ID: self.unique_id,
ATTR_CLUSTER_ID: self.cluster.cluster_id,
ATTR_COMMAND: command,
# Maintain backwards compatibility with the old zigpy response format
ATTR_ARGS: args,
ATTR_PARAMS: params,
}
)
async def async_update(self):
"""Retrieve latest state from cluster."""
def _get_attribute_name(self, attrid: int) -> str | int:
if attrid not in self.cluster.attributes:
return attrid
return self.cluster.attributes[attrid].name
async def get_attribute_value(self, attribute, from_cache=True):
"""Get the value for an attribute."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
result = await safe_read(
self._cluster,
[attribute],
allow_cache=from_cache,
only_cache=from_cache,
manufacturer=manufacturer,
)
return result.get(attribute)
async def _get_attributes(
self,
raise_exceptions: bool,
attributes: list[int | str],
from_cache: bool = True,
only_cache: bool = True,
) -> dict[int | str, Any]:
"""Get the values for a list of attributes."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
chunk = attributes[:ZHA_CHANNEL_READS_PER_REQ]
rest = attributes[ZHA_CHANNEL_READS_PER_REQ:]
result = {}
while chunk:
try:
self.debug("Reading attributes in chunks: %s", chunk)
read, _ = await self.cluster.read_attributes(
chunk,
allow_cache=from_cache,
only_cache=only_cache,
manufacturer=manufacturer,
)
result.update(read)
except (asyncio.TimeoutError, zigpy.exceptions.ZigbeeException) as ex:
self.debug(
"failed to get attributes '%s' on '%s' cluster: %s",
chunk,
self.cluster.ep_attribute,
str(ex),
)
if raise_exceptions:
raise
chunk = rest[:ZHA_CHANNEL_READS_PER_REQ]
rest = rest[ZHA_CHANNEL_READS_PER_REQ:]
return result
get_attributes = partialmethod(_get_attributes, False)
def log(self, level, msg, *args, **kwargs):
"""Log a message."""
msg = f"[%s:%s]: {msg}"
args = (self._ch_pool.nwk, self._id) + args
_LOGGER.log(level, msg, *args, **kwargs)
def __getattr__(self, name):
"""Get attribute or a decorated cluster command."""
if hasattr(self._cluster, name) and callable(getattr(self._cluster, name)):
command = getattr(self._cluster, name)
command.__name__ = name
return decorate_command(self, command)
return self.__getattribute__(name)
class ZDOChannel(LogMixin):
"""Channel for ZDO events."""
def __init__(self, cluster, device):
"""Initialize ZDOChannel."""
self.name = CHANNEL_ZDO
self._cluster = cluster
self._zha_device = device
self._status = ChannelStatus.CREATED
self._unique_id = f"{str(device.ieee)}:{device.name}_ZDO"
self._cluster.add_listener(self)
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the aigpy cluster for this channel."""
return self._cluster
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def device_announce(self, zigpy_device):
"""Device announce handler."""
@callback
def permit_duration(self, duration):
"""Permit handler."""
async def async_initialize(self, from_cache):
"""Initialize channel."""
self._status = ChannelStatus.INITIALIZED
async def async_configure(self):
"""Configure channel."""
self._status = ChannelStatus.CONFIGURED
def log(self, level, msg, *args, **kwargs):
"""Log a message."""
msg = f"[%s:ZDO](%s): {msg}"
args = (self._zha_device.nwk, self._zha_device.model) + args
_LOGGER.log(level, msg, *args, **kwargs)
class ClientChannel(ZigbeeChannel):
"""Channel listener for Zigbee client (output) clusters."""
@callback
def attribute_updated(self, attrid, value):
"""Handle an attribute updated on this cluster."""
try:
attr_name = self._cluster.attributes[attrid].name
except KeyError:
attr_name = "Unknown"
self.zha_send_event(
SIGNAL_ATTR_UPDATED,
{
ATTR_ATTRIBUTE_ID: attrid,
ATTR_ATTRIBUTE_NAME: attr_name,
ATTR_VALUE: value,
},
)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle a cluster command received on this cluster."""
if (
self._cluster.server_commands is not None
and self._cluster.server_commands.get(command_id) is not None
):
self.zha_send_event(self._cluster.server_commands[command_id].name, args)
| {
"content_hash": "0f37a118666a580bfe5f54a098a97c7f",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 88,
"avg_line_length": 33.84347826086957,
"alnum_prop": 0.5539568345323741,
"repo_name": "nkgilley/home-assistant",
"id": "ae5980cd63063ff431695e25f732784b4a1ff6bb",
"size": "19460",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/core/channels/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
ALL_EVENT_TYPES = [
"AddTeam",
"UnAcknowledge",
"AddNote",
"TestAction",
"Close",
"Escalate",
"AddRecipient",
"RemoveTags",
"Acknowledge",
"Delete",
"AddTags",
"TakeOwnership",
"Create",
"AssignOwnership",
]
@webhook_view("Opsgenie", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_opsgenie_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
# construct the body of the message
info = {
"additional_info": "",
"alert_type": payload["action"],
"alert_id": payload["alert"]["alertId"],
"integration_name": payload["integrationName"],
"tags": ", ".join("`" + tag + "`" for tag in payload["alert"].get("tags", [])),
}
topic = info["integration_name"]
bullet_template = "* **{key}**: {value}\n"
if "note" in payload["alert"]:
info["additional_info"] += bullet_template.format(
key="Note",
value=payload["alert"]["note"],
)
if "recipient" in payload["alert"]:
info["additional_info"] += bullet_template.format(
key="Recipient",
value=payload["alert"]["recipient"],
)
if "addedTags" in payload["alert"]:
info["additional_info"] += bullet_template.format(
key="Tags added",
value=payload["alert"]["addedTags"],
)
if "team" in payload["alert"]:
info["additional_info"] += bullet_template.format(
key="Team added",
value=payload["alert"]["team"],
)
if "owner" in payload["alert"]:
info["additional_info"] += bullet_template.format(
key="Assigned owner",
value=payload["alert"]["owner"],
)
if "escalationName" in payload:
info["additional_info"] += bullet_template.format(
key="Escalation",
value=payload["escalationName"],
)
if "removedTags" in payload["alert"]:
info["additional_info"] += bullet_template.format(
key="Tags removed",
value=payload["alert"]["removedTags"],
)
if "message" in payload["alert"]:
info["additional_info"] += bullet_template.format(
key="Message",
value=payload["alert"]["message"],
)
if info["tags"]:
info["additional_info"] += bullet_template.format(
key="Tags",
value=info["tags"],
)
body_template = """
[Opsgenie alert for {integration_name}](https://app.opsgenie.com/alert/V2#/show/{alert_id}):
* **Type**: {alert_type}
{additional_info}
""".strip()
body = body_template.format(**info)
check_send_webhook_message(request, user_profile, topic, body, info["alert_type"])
return json_success(request)
| {
"content_hash": "2ada3ae6f3a56b225b029fba40c4dde3",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 92,
"avg_line_length": 30.71153846153846,
"alnum_prop": 0.5892298058860364,
"repo_name": "kou/zulip",
"id": "886a1f143982825c388aa95bed11fb7caeafc5ba",
"size": "3194",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/webhooks/opsgenie/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433376"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "635452"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3361648"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79932"
},
{
"name": "Python",
"bytes": "8142846"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "134587"
},
{
"name": "TypeScript",
"bytes": "20233"
}
],
"symlink_target": ""
} |
import sys
try:
str_type = basestring
str_is_unicode = False
except NameError:
str_type = str
str_is_unicode = True
try:
unichr = unichr
except NameError:
unichr = chr
if str_is_unicode:
def native_str(s, errors=None):
return s
else:
def native_str(s, errors=None):
return s.encode(errors=errors)
if sys.version_info < (3, 0):
PY3 = False
else:
PY3 = True
| {
"content_hash": "00dd7c249df06c98dd1824777f5e7d1a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 38,
"avg_line_length": 16.64,
"alnum_prop": 0.6225961538461539,
"repo_name": "dittos/graphqllib",
"id": "54fce3d42370c32f86292d9de4a667886714884c",
"size": "416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "graphql/core/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "468582"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import time
import random
import tempfile
import subprocess
import shutil
import argparse
# params overwrite priority:
# for default:
# default_params < {blackbox,whitebox}_default_params < args
# for simple:
# default_params < {blackbox,whitebox}_default_params <
# simple_default_params <
# {blackbox,whitebox}_simple_default_params < args
# for cf_consistency:
# default_params < {blackbox,whitebox}_default_params <
# cf_consistency_params < args
# for txn:
# default_params < {blackbox,whitebox}_default_params < txn_params < args
expected_values_file = tempfile.NamedTemporaryFile()
default_params = {
"acquire_snapshot_one_in": 10000,
"block_size": 16384,
"bloom_bits": lambda: random.choice([random.randint(0,19),
random.lognormvariate(2.3, 1.3)]),
"cache_index_and_filter_blocks": lambda: random.randint(0, 1),
"cache_size": 1048576,
"checkpoint_one_in": 1000000,
"compression_type": lambda: random.choice(
["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress", "zstd"]),
"bottommost_compression_type": lambda:
"disable" if random.randint(0, 1) == 0 else
random.choice(
["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress",
"zstd"]),
"checksum_type" : lambda: random.choice(["kCRC32c", "kxxHash", "kxxHash64"]),
"compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1),
"compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1),
# Disabled compression_parallel_threads as the feature is not stable
# lambda: random.choice([1] * 9 + [4])
"compression_parallel_threads": 1,
"clear_column_family_one_in": 0,
"compact_files_one_in": 1000000,
"compact_range_one_in": 1000000,
"delpercent": 4,
"delrangepercent": 1,
"destroy_db_initially": 0,
"enable_pipelined_write": lambda: random.randint(0, 1),
"expected_values_path": expected_values_file.name,
"flush_one_in": 1000000,
"get_live_files_one_in": 1000000,
# Note: the following two are intentionally disabled as the corresponding
# APIs are not guaranteed to succeed.
"get_sorted_wal_files_one_in": 0,
"get_current_wal_file_one_in": 0,
# Temporarily disable hash index
"index_type": lambda: random.choice([0, 0, 0, 2, 2, 3]),
"max_background_compactions": 20,
"max_bytes_for_level_base": 10485760,
"max_key": 100000000,
"max_write_buffer_number": 3,
"mmap_read": lambda: random.randint(0, 1),
"nooverwritepercent": 1,
"open_files": lambda : random.choice([-1, -1, 100, 500000]),
"partition_filters": lambda: random.randint(0, 1),
"pause_background_one_in": 1000000,
"prefixpercent": 5,
"progress_reports": 0,
"readpercent": 45,
"recycle_log_file_num": lambda: random.randint(0, 1),
"reopen": 20,
"snapshot_hold_ops": 100000,
"long_running_snapshots": lambda: random.randint(0, 1),
"subcompactions": lambda: random.randint(1, 4),
"target_file_size_base": 2097152,
"target_file_size_multiplier": 2,
"use_direct_reads": lambda: random.randint(0, 1),
"use_direct_io_for_flush_and_compaction": lambda: random.randint(0, 1),
"mock_direct_io": False,
"use_full_merge_v1": lambda: random.randint(0, 1),
"use_merge": lambda: random.randint(0, 1),
"verify_checksum": 1,
"write_buffer_size": 4 * 1024 * 1024,
"writepercent": 35,
"format_version": lambda: random.choice([2, 3, 4, 5, 5]),
"index_block_restart_interval": lambda: random.choice(range(1, 16)),
"use_multiget" : lambda: random.randint(0, 1),
"periodic_compaction_seconds" :
lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
"compaction_ttl" : lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
# Test small max_manifest_file_size in a smaller chance, as most of the
# time we wnat manifest history to be preserved to help debug
"max_manifest_file_size" : lambda : random.choice(
[t * 16384 if t < 3 else 1024 * 1024 * 1024 for t in range(1, 30)]),
# Sync mode might make test runs slower so running it in a smaller chance
"sync" : lambda : random.choice(
[1 if t == 0 else 0 for t in range(0, 20)]),
# Disable compation_readahead_size because the test is not passing.
#"compaction_readahead_size" : lambda : random.choice(
# [0, 0, 1024 * 1024]),
"db_write_buffer_size" : lambda: random.choice(
[0, 0, 0, 1024 * 1024, 8 * 1024 * 1024, 128 * 1024 * 1024]),
"avoid_unnecessary_blocking_io" : random.randint(0, 1),
"write_dbid_to_manifest" : random.randint(0, 1),
"avoid_flush_during_recovery" : random.choice(
[1 if t == 0 else 0 for t in range(0, 8)]),
"max_write_batch_group_size_bytes" : lambda: random.choice(
[16, 64, 1024 * 1024, 16 * 1024 * 1024]),
"level_compaction_dynamic_level_bytes" : True,
"verify_checksum_one_in": 1000000,
"verify_db_one_in": 100000,
"continuous_verification_interval" : 0,
"max_key_len": 3,
"key_len_percent_dist": "1,30,69",
"read_fault_one_in": lambda: random.choice([0, 1000]),
"sync_fault_injection": False
}
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'
_DEBUG_LEVEL_ENV_VAR = 'DEBUG_LEVEL'
def is_release_mode():
return os.environ.get(_DEBUG_LEVEL_ENV_VAR) == "0"
def get_dbname(test_name):
test_dir_name = "rocksdb_crashtest_" + test_name
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
if test_tmpdir is None or test_tmpdir == "":
dbname = tempfile.mkdtemp(prefix=test_dir_name)
else:
dbname = test_tmpdir + "/" + test_dir_name
shutil.rmtree(dbname, True)
os.mkdir(dbname)
return dbname
def is_direct_io_supported(dbname):
with tempfile.NamedTemporaryFile(dir=dbname) as f:
try:
os.open(f.name, os.O_DIRECT)
except BaseException:
return False
return True
blackbox_default_params = {
# total time for this script to test db_stress
"duration": 6000,
# time for one db_stress instance to run
"interval": 120,
# since we will be killing anyway, use large value for ops_per_thread
"ops_per_thread": 100000000,
"set_options_one_in": 10000,
"test_batches_snapshots": 1,
}
whitebox_default_params = {
"duration": 10000,
"log2_keys_per_lock": 10,
"ops_per_thread": 200000,
"random_kill_odd": 888887,
"test_batches_snapshots": lambda: random.randint(0, 1),
}
simple_default_params = {
"allow_concurrent_memtable_write": lambda: random.randint(0, 1),
"column_families": 1,
"max_background_compactions": 1,
"max_bytes_for_level_base": 67108864,
"memtablerep": "skip_list",
"prefixpercent": 0,
"readpercent": 50,
"prefix_size" : -1,
"target_file_size_base": 16777216,
"target_file_size_multiplier": 1,
"test_batches_snapshots": 0,
"write_buffer_size": 32 * 1024 * 1024,
"level_compaction_dynamic_level_bytes": False,
}
blackbox_simple_default_params = {
"open_files": -1,
"set_options_one_in": 0,
}
whitebox_simple_default_params = {}
cf_consistency_params = {
"disable_wal": lambda: random.randint(0, 1),
"reopen": 0,
"test_cf_consistency": 1,
# use small value for write_buffer_size so that RocksDB triggers flush
# more frequently
"write_buffer_size": 1024 * 1024,
"enable_pipelined_write": lambda: random.randint(0, 1),
}
txn_params = {
"use_txn" : 1,
# Avoid lambda to set it once for the entire test
"txn_write_policy": random.randint(0, 2),
"unordered_write": random.randint(0, 1),
"disable_wal": 0,
# OpenReadOnly after checkpoint is not currnetly compatible with WritePrepared txns
"checkpoint_one_in": 0,
# pipeline write is not currnetly compatible with WritePrepared txns
"enable_pipelined_write": 0,
}
def finalize_and_sanitize(src_params):
dest_params = dict([(k, v() if callable(v) else v)
for (k, v) in src_params.items()])
if dest_params.get("compression_type") != "zstd" or \
dest_params.get("compression_max_dict_bytes") == 0:
dest_params["compression_zstd_max_train_bytes"] = 0
if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
dest_params["memtablerep"] = "skip_list"
if dest_params["mmap_read"] == 1:
dest_params["use_direct_io_for_flush_and_compaction"] = 0
dest_params["use_direct_reads"] = 0
if (dest_params["use_direct_io_for_flush_and_compaction"] == 1
or dest_params["use_direct_reads"] == 1) and \
not is_direct_io_supported(dest_params["db"]):
if is_release_mode():
print("{} does not support direct IO".format(dest_params["db"]))
sys.exit(1)
else:
dest_params["mock_direct_io"] = True
# DeleteRange is not currnetly compatible with Txns
if dest_params.get("test_batches_snapshots") == 1 or \
dest_params.get("use_txn") == 1:
dest_params["delpercent"] += dest_params["delrangepercent"]
dest_params["delrangepercent"] = 0
# Only under WritePrepared txns, unordered_write would provide the same guarnatees as vanilla rocksdb
if dest_params.get("unordered_write", 0) == 1:
dest_params["txn_write_policy"] = 1
dest_params["allow_concurrent_memtable_write"] = 1
if dest_params.get("disable_wal", 0) == 1:
dest_params["atomic_flush"] = 1
dest_params["sync"] = 0
if dest_params.get("open_files", 1) != -1:
# Compaction TTL and periodic compactions are only compatible
# with open_files = -1
dest_params["compaction_ttl"] = 0
dest_params["periodic_compaction_seconds"] = 0
if dest_params.get("compaction_style", 0) == 2:
# Disable compaction TTL in FIFO compaction, because right
# now assertion failures are triggered.
dest_params["compaction_ttl"] = 0
dest_params["periodic_compaction_seconds"] = 0
if dest_params["partition_filters"] == 1:
if dest_params["index_type"] != 2:
dest_params["partition_filters"] = 0
else:
dest_params["use_block_based_filter"] = 0
if dest_params.get("atomic_flush", 0) == 1:
# disable pipelined write when atomic flush is used.
dest_params["enable_pipelined_write"] = 0
return dest_params
def gen_cmd_params(args):
params = {}
params.update(default_params)
if args.test_type == 'blackbox':
params.update(blackbox_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_default_params)
if args.simple:
params.update(simple_default_params)
if args.test_type == 'blackbox':
params.update(blackbox_simple_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_simple_default_params)
if args.cf_consistency:
params.update(cf_consistency_params)
if args.txn:
params.update(txn_params)
for k, v in vars(args).items():
if v is not None:
params[k] = v
return params
def gen_cmd(params, unknown_params):
finalzied_params = finalize_and_sanitize(params)
cmd = ['./db_stress'] + [
'--{0}={1}'.format(k, v)
for k, v in [(k, finalzied_params[k]) for k in sorted(finalzied_params)]
if k not in set(['test_type', 'simple', 'duration', 'interval',
'random_kill_odd', 'cf_consistency', 'txn'])
and v is not None] + unknown_params
return cmd
# This script runs and kills db_stress multiple times. It checks consistency
# in case of unsafe crashes in RocksDB.
def blackbox_crash_main(args, unknown_args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('blackbox')
exit_time = time.time() + cmd_params['duration']
print("Running blackbox-crash-test with \n"
+ "interval_between_crash=" + str(cmd_params['interval']) + "\n"
+ "total-duration=" + str(cmd_params['duration']) + "\n")
while time.time() < exit_time:
run_had_errors = False
killtime = time.time() + cmd_params['interval']
cmd = gen_cmd(dict(
list(cmd_params.items())
+ list({'db': dbname}.items())), unknown_args)
child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
print("Running db_stress with pid=%d: %s\n\n"
% (child.pid, ' '.join(cmd)))
stop_early = False
while time.time() < killtime:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
stop_early = True
break
time.sleep(1)
if not stop_early:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
else:
child.kill()
print("KILLED %d\n" % child.pid)
time.sleep(1) # time to stabilize after a kill
while True:
line = child.stderr.readline().strip().decode('utf-8')
if line == '':
break
elif not line.startswith('WARNING'):
run_had_errors = True
print('stderr has error message:')
print('***' + line + '***')
if run_had_errors:
sys.exit(2)
time.sleep(1) # time to stabilize before the next run
# we need to clean up after ourselves -- only do this on test success
shutil.rmtree(dbname, True)
# This python script runs db_stress multiple times. Some runs with
# kill_random_test that causes rocksdb to crash at various points in code.
def whitebox_crash_main(args, unknown_args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('whitebox')
cur_time = time.time()
exit_time = cur_time + cmd_params['duration']
half_time = cur_time + cmd_params['duration'] // 2
print("Running whitebox-crash-test with \n"
+ "total-duration=" + str(cmd_params['duration']) + "\n")
total_check_mode = 4
check_mode = 0
kill_random_test = cmd_params['random_kill_odd']
kill_mode = 0
while time.time() < exit_time:
if check_mode == 0:
additional_opts = {
# use large ops per thread since we will kill it anyway
"ops_per_thread": 100 * cmd_params['ops_per_thread'],
}
# run with kill_random_test, with three modes.
# Mode 0 covers all kill points. Mode 1 covers less kill points but
# increases change of triggering them. Mode 2 covers even less
# frequent kill points and further increases triggering change.
if kill_mode == 0:
additional_opts.update({
"kill_random_test": kill_random_test,
})
elif kill_mode == 1:
if cmd_params.get('disable_wal', 0) == 1:
my_kill_odd = kill_random_test // 50 + 1
else:
my_kill_odd = kill_random_test // 10 + 1
additional_opts.update({
"kill_random_test": my_kill_odd,
"kill_prefix_blacklist": "WritableFileWriter::Append,"
+ "WritableFileWriter::WriteBuffered",
})
elif kill_mode == 2:
# TODO: May need to adjust random odds if kill_random_test
# is too small.
additional_opts.update({
"kill_random_test": (kill_random_test // 5000 + 1),
"kill_prefix_blacklist": "WritableFileWriter::Append,"
"WritableFileWriter::WriteBuffered,"
"PosixMmapFile::Allocate,WritableFileWriter::Flush",
})
# Run kill mode 0, 1 and 2 by turn.
kill_mode = (kill_mode + 1) % 3
elif check_mode == 1:
# normal run with universal compaction mode
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
"compaction_style": 1,
}
# Single level universal has a lot of special logic. Ensure we cover
# it sometimes.
if random.randint(0, 1) == 1:
additional_opts.update({
"num_levels": 1,
})
elif check_mode == 2:
# normal run with FIFO compaction mode
# ops_per_thread is divided by 5 because FIFO compaction
# style is quite a bit slower on reads with lot of files
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'] // 5,
"compaction_style": 2,
}
else:
# normal run
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
}
cmd = gen_cmd(dict(list(cmd_params.items())
+ list(additional_opts.items())
+ list({'db': dbname}.items())), unknown_args)
print("Running:" + ' '.join(cmd) + "\n") # noqa: E999 T25377293 Grandfathered in
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdoutdata, stderrdata = popen.communicate()
if stdoutdata:
stdoutdata = stdoutdata.decode('utf-8')
if stderrdata:
stderrdata = stderrdata.decode('utf-8')
retncode = popen.returncode
msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
check_mode, additional_opts['kill_random_test'], retncode))
print(msg)
print(stdoutdata)
expected = False
if additional_opts['kill_random_test'] is None and (retncode == 0):
# we expect zero retncode if no kill option
expected = True
elif additional_opts['kill_random_test'] is not None and retncode <= 0:
# When kill option is given, the test MIGHT kill itself.
# If it does, negative retncode is expected. Otherwise 0.
expected = True
if not expected:
print("TEST FAILED. See kill option and exit code above!!!\n")
sys.exit(1)
stdoutdata = stdoutdata.lower()
errorcount = (stdoutdata.count('error') -
stdoutdata.count('got errors 0 times'))
print("#times error occurred in output is " + str(errorcount) + "\n")
if (errorcount > 0):
print("TEST FAILED. Output has 'error'!!!\n")
sys.exit(2)
if (stdoutdata.find('fail') >= 0):
print("TEST FAILED. Output has 'fail'!!!\n")
sys.exit(2)
# First half of the duration, keep doing kill test. For the next half,
# try different modes.
if time.time() > half_time:
# we need to clean up after ourselves -- only do this on test
# success
shutil.rmtree(dbname, True)
os.mkdir(dbname)
cmd_params.pop('expected_values_path', None)
check_mode = (check_mode + 1) % total_check_mode
time.sleep(1) # time to stabilize after a kill
def main():
parser = argparse.ArgumentParser(description="This script runs and kills \
db_stress multiple times")
parser.add_argument("test_type", choices=["blackbox", "whitebox"])
parser.add_argument("--simple", action="store_true")
parser.add_argument("--cf_consistency", action='store_true')
parser.add_argument("--txn", action='store_true')
all_params = dict(list(default_params.items())
+ list(blackbox_default_params.items())
+ list(whitebox_default_params.items())
+ list(simple_default_params.items())
+ list(blackbox_simple_default_params.items())
+ list(whitebox_simple_default_params.items()))
for k, v in all_params.items():
parser.add_argument("--" + k, type=type(v() if callable(v) else v))
# unknown_args are passed directly to db_stress
args, unknown_args = parser.parse_known_args()
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
if test_tmpdir is not None and not os.path.isdir(test_tmpdir):
print('%s env var is set to a non-existent directory: %s' %
(_TEST_DIR_ENV_VAR, test_tmpdir))
sys.exit(1)
if args.test_type == 'blackbox':
blackbox_crash_main(args, unknown_args)
if args.test_type == 'whitebox':
whitebox_crash_main(args, unknown_args)
if __name__ == '__main__':
main()
| {
"content_hash": "3cd17e7475dbfac815e42e662c71fa6c",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 105,
"avg_line_length": 39.27238805970149,
"alnum_prop": 0.5914964370546318,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "630b96b041c420ec9836bfd03b2f0211e72d1acf",
"size": "21144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packs_lib/rocksdb/rocksdb/tools/db_crashtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import djstripe
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dj-stripe'
copyright = u'2013, Daniel Greenfeld'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djstripe.__version__
# The full version, including alpha/beta/rc tags.
release = djstripe.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dj-stripedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dj-stripe.tex', u'dj-stripe Documentation',
u'Daniel Greenfeld', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dj-stripe', u'dj-stripe Documentation',
[u'Daniel Greenfeld'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dj-stripe', u'dj-stripe Documentation',
u'Daniel Greenfeld', 'dj-stripe', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "9359c8dab0364e42b7f89612afa45888",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 80,
"avg_line_length": 31.966804979253112,
"alnum_prop": 0.7034008307372793,
"repo_name": "photocrowd/dj-stripe",
"id": "8aff80ac7235638bf1a7e66b27d5fcc704fb8276",
"size": "8125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "21071"
},
{
"name": "Python",
"bytes": "322011"
}
],
"symlink_target": ""
} |
import logging
import httplib as http
import math
from itertools import islice
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException, ValidationValueError
from framework import status
from framework.utils import iso8601format
from framework.mongo import StoredObject
from framework.flask import redirect
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo.utils import get_or_http_error
from website import language
from website.util import paths
from website.util import rubeus
from website.exceptions import NodeStateError
from website.project import new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public_but_not_anonymized,
must_be_contributor_or_public,
must_be_contributor,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
http_error_if_disk_saving_mode
)
from website.tokens import process_token_or_pass
from website.util.permissions import ADMIN, READ, WRITE
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, get_pointer_parent, NodeUpdateError, validate_title
from website.project.forms import NewNodeForm
from website.project.metadata.utils import serialize_meta_schemas
from website.models import Node, Pointer, WatchConfig, PrivateLink, Comment
from website import settings
from website.views import _render_nodes, find_dashboard, validate_page_num
from website.profile import utils
from website.project import new_folder
from website.project.licenses import serialize_node_license_record
from website.util.sanitize import strip_html
from website.util import rapply
r_strip_html = lambda collection: rapply(collection, strip_html)
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = post_data.get('value', '')
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
elif edited_field == 'description':
node.set_description(value, auth=auth)
node.save()
return {'status': 'success'}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
new_project = {}
if template:
original_node = Node.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Folder
##############################################################################
@must_be_valid_project
@must_be_logged_in
def folder_new_post(auth, node, **kwargs):
user = auth.user
title = request.json.get('title')
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(strip_html(title), user)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {
'projectUrl': '/dashboard/',
}, http.CREATED
@collect_auth
def add_folder(auth, **kwargs):
data = request.get_json()
node_id = data.get('node_id')
node = get_or_http_error(Node, node_id)
user = auth.user
title = strip_html(data.get('title'))
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(
title, user
)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 201, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
new_component = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
redirect_url = node.url
message = (
'Your component was created successfully. You can keep working on the project page below, '
'or go to the new <u><a href={component_url}>component</a></u>.'
).format(component_url=new_component.url)
if form.inherit_contributors.data and node.has_permission(user, ADMIN):
for contributor in node.contributors:
new_component.add_contributor(contributor, permissions=node.get_permissions(contributor), auth=auth)
new_component.save()
redirect_url = new_component.url + 'contributors/'
message = (
'Your component was created successfully. You can edit the contributor permissions below, '
'work on your <u><a href={component_url}>component</a></u> or return to the <u> '
'<a href="{project_url}">project page</a></u>.'
).format(component_url=new_component.url, project_url=node.url)
status.push_status_message(message, kind='info', trust=True)
return {
'status': 'success',
}, 201, None, redirect_url
else:
# TODO: This function doesn't seem to exist anymore?
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
@http_error_if_disk_saving_mode
def node_fork_page(auth, node, **kwargs):
try:
fork = node.fork_node(auth)
except PermissionsError:
raise HTTPError(
http.FORBIDDEN,
redirect_url=node.url
)
message = '{} has been successfully forked.'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, kind='success', trust=False)
return fork.url
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_registrations(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_forks(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_logged_in
@must_be_contributor
def node_setting(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
config = addon.to_json(auth.user)
# inject the MakoTemplateLookup into the template context
# TODO inject only short_name and render fully client side
config['template_lookup'] = addon.config.template_lookup
config['addon_icon_url'] = addon.config.icon_url
addon_enabled_settings.append(config)
addon_enabled_settings = sorted(addon_enabled_settings, key=lambda addon: addon['addon_full_name'].lower())
ret['addon_categories'] = settings.ADDON_CATEGORIES
ret['addons_available'] = sorted([
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node'] and addon.short_name != 'wiki'
], key=lambda addon: addon.full_name.lower())
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.owners and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node'] and addon.short_name == 'wiki':
ret['wiki'] = addon
break
ret['addons_enabled'] = addons_enabled
ret['addon_enabled_settings'] = addon_enabled_settings
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_js'] = collect_node_config_js(node.get_addons())
ret['include_wiki_settings'] = node.include_wiki_settings(auth.user)
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = Node.CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
js_path = paths.resolve_addon_path(addon.config, 'node-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission(READ)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['adminContributors'] = utils.serialize_contributors(node.admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
@process_token_or_pass
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth, primary=primary)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
return ret
# Expand/Collapse
@must_be_valid_project
@must_be_contributor_or_public
def expand(auth, node, **kwargs):
node.expand(user=auth.user)
return {}, 200, None
@must_be_valid_project
@must_be_contributor_or_public
def collapse(auth, node, **kwargs):
node.collapse(user=auth.user)
return {}, 200, None
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node IDs and
node type delimited by ':'.
"""
# TODO(sloria): Change new_list parameter to be an array of objects
# {
# 'newList': {
# {'key': 'abc123', 'type': 'node'}
# }
# }
new_list = [
tuple(n.split(':'))
for n in request.json.get('new_list', [])
]
nodes_new = [
StoredObject.get_collection(schema).load(key)
for key, schema in new_list
]
valid_nodes = [
n for n in node.nodes
if not n.is_deleted
]
deleted_nodes = [
n for n in node.nodes
if n.is_deleted
]
if len(valid_nodes) == len(nodes_new) and set(valid_nodes) == set(nodes_new):
node.nodes = nodes_new + deleted_nodes
node.save()
return {}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics(auth, node, **kwargs):
if not (node.can_edit(auth) or node.is_public):
raise HTTPError(http.FORBIDDEN)
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics_redirect(auth, node, **kwargs):
return redirect(node.web_url_for("project_statistics", _guid=True))
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_before_set_public(node, **kwargs):
prompt = node.callback('before_make_public')
return {
'prompts': prompt
}
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except ValueError: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except ValueError: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(auth, node, **kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
user = auth.user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched),
'watched': user.is_watching(node)
}
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and discription which can be edited by write permission contributor
data = r_strip_html(request.get_json())
try:
updated_field_names = node.update(data, auth=auth)
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
# Need to cast tags to a string to make them JSON-serialiable
updated_fields_dict = {
key: getattr(node, key) if key != 'tags' else [str(tag) for tag in node.tags]
for key in updated_field_names
if key != 'logs' and key != 'date_modified'
}
node.save()
return {'updated_fields': updated_fields_dict}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
node.save()
message = '{} has been successfully deleted.'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, kind='success', trust=False)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.node__parent[0].url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_have_permission(ADMIN)
@must_not_be_registration
def delete_folder(auth, node, **kwargs):
"""Remove folder node
"""
if node is None:
raise HTTPError(http.BAD_REQUEST)
if not node.is_folder or node.is_dashboard:
raise HTTPError(http.BAD_REQUEST)
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
return {}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.load(link_id)
link.is_deleted = True
link.save()
except ModularOdmException:
raise HTTPError(http.NOT_FOUND)
# TODO: Split into separate functions
def _render_addon(node):
widgets = {}
configs = {}
js = []
css = []
for addon in node.get_addons():
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, user):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = node.get_wiki_page('home', None)
if not node.has_permission(user, 'write'):
return has_wiki and wiki_page and wiki_page.html(node)
else:
return has_wiki
def _view_project(node, auth, primary=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
user = auth.user
parent = node.parent_node
if user:
dashboard = find_dashboard(user)
dashboard_id = dashboard._id
in_dashboard = dashboard.pointing_at(node._primary_key) is not None
else:
in_dashboard = False
dashboard_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
widgets, configs, js, css = _render_addon(node)
redirect_url = node.url + '?view_only=None'
# Before page load callback; skip if not primary call
if primary:
for addon in node.get_addons():
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, kind='info', dismissible=False, trust=True)
data = {
'node': {
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'license': serialize_node_license_record(node.license),
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_dashboard,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.date_created),
'date_modified': iso8601format(node.logs[-1].date) if node.logs else '',
'tags': [tag._primary_key for tag in node.tags],
'children': bool(node.nodes_active),
'is_registration': node.is_registration,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'retracted_justification': getattr(node.retraction, 'justification', None),
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'registered_from_url': node.registered_from.url if node.is_registration else '',
'registered_date': iso8601format(node.registered_date) if node.is_registration else '',
'root_id': node.root._id if node.root else None,
'registered_meta': node.registered_meta,
'registered_schemas': serialize_meta_schemas(node.registered_schema),
'registration_count': len(node.node__registrations),
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': len(node.forks),
'templated_count': len(node.templated_list),
'watched_count': len(node.watchconfig__watched),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'anonymous': anonymous,
'points': len(node.get_points(deleted=False, folders=False)),
'piwik_site_id': node.piwik_site_id,
'comment_level': node.comment_level,
'has_comments': bool(Comment.find(Q('node', 'eq', node))),
'has_children': bool(Comment.find(Q('node', 'eq', node))),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
'institution': {
'name': node.primary_institution.name if node.primary_institution else None,
'logo_path': node.primary_institution.logo_path if node.primary_institution else None,
},
'alternative_citations': [citation.to_json() for citation in node.alternative_citations],
'has_draft_registrations': node.has_active_draft_registrations,
'contributors': [contributor._id for contributor in node.contributors]
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations') if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False
},
'user': {
'is_contributor': node.is_contributor(user),
'is_admin': node.has_permission(user, ADMIN),
'is_admin_parent': parent.is_admin_parent(user) if parent else False,
'can_edit': (node.can_edit(auth)
and not node.is_registration),
'has_read_permissions': node.has_permission(user, READ),
'permissions': node.get_permissions(user) if user else [],
'is_watching': user.is_watching(node) if user else False,
'piwik_token': user.piwik_token if user else '',
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, user),
'dashboard_id': dashboard_id,
},
'badges': _get_badge(user),
# TODO: Namespace with nested dicts
'addons_enabled': node.get_addon_names(),
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': Node.CATEGORY_MAP
}
return data
def _get_badge(user):
if user:
badger = user.get_addon('badges')
if badger:
return {
'can_award': badger.can_award,
'badges': badger.get_badges_json()
}
return {}
def _get_children(node, auth, indent=0):
children = []
for child in node.nodes_primary:
if not child.is_deleted and child.has_permission(auth.user, ADMIN):
children.append({
'id': child._primary_key,
'title': child.title,
'indent': indent,
'is_public': child.is_public,
'parent_id': child.parent_id,
})
children.extend(_get_children(child, auth, indent + 1))
return children
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
@must_have_permission(ADMIN)
def get_editable_children(auth, node, **kwargs):
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_summary(node, auth, primary=True, link_id=None, show_path=False):
# TODO(sloria): Refactor this or remove (lots of duplication with _view_project)
summary = {
'id': link_id if link_id else node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'archiving': node.archiving,
}
if node.can_view(auth):
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'node_type': node.project_or_component,
'is_fork': node.is_fork,
'is_registration': node.is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'forked_date': node.forked_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_fork
else None,
'ua_count': None,
'ua': None,
'non_ua': None,
'addons_enabled': node.get_addon_names(),
'is_public': node.is_public,
'parent_title': node.parent_node.title if node.parent_node else None,
'parent_is_public': node.parent_node.is_public if node.parent_node else False,
'show_path': show_path,
'nlogs': len(node.logs),
})
else:
summary['can_view'] = False
# TODO: Make output format consistent with _view_project
return {
'summary': summary,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_summary(auth, node, **kwargs):
primary = kwargs.get('primary')
link_id = kwargs.get('link_id')
show_path = kwargs.get('show_path', False)
return _get_summary(
node, auth, primary=primary, link_id=link_id, show_path=show_path
)
@must_be_contributor_or_public
def get_children(auth, node, **kwargs):
user = auth.user
if request.args.get('permissions'):
perm = request.args['permissions'].lower().strip()
nodes = [
each
for each in node.nodes
if perm in each.get_permissions(user) and not each.is_deleted
]
else:
nodes = [
each
for each in node.nodes
if not each.is_deleted
]
return _render_nodes(nodes, auth)
def node_child_tree(user, node_ids):
""" Format data to test for node privacy settings for use in treebeard.
"""
items = []
for node_id in node_ids:
node = Node.load(node_id)
assert node, '{} is not a valid Node.'.format(node_id)
can_read = node.has_permission(user, 'read')
can_read_children = node.has_permission_on_children(user, 'read')
if not can_read and not can_read_children:
continue
contributors = []
for contributor in node.contributors:
contributors.append({
'id': contributor._id,
'is_admin': node.has_permission(contributor, ADMIN),
'is_confirmed': contributor.is_confirmed
})
children = []
# List project/node if user has at least 'read' permissions (contributor or admin viewer) or if
# user is contributor on a component of the project/node
can_write = node.has_permission(user, 'admin')
children.extend(node_child_tree(
user,
[
n._id
for n in node.nodes
if n.primary and
not n.is_deleted
]
))
item = {
'node': {
'id': node_id,
'url': node.url if can_read else '',
'title': node.title if can_read else 'Private Project',
'is_public': node.is_public,
'can_write': can_write,
'contributors': contributors,
'visible_contributors': node.visible_contributor_ids,
'is_admin': node.has_permission(user, ADMIN)
},
'user_id': user._id,
'children': children,
'kind': 'folder' if not node.node__parent or not node.parent_node.has_permission(user, 'read') else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
}
}
items.append(item)
return items
@must_be_logged_in
@must_be_valid_project
def get_node_tree(auth, **kwargs):
node = kwargs.get('node') or kwargs['project']
tree = node_child_tree(auth.user, [node._id])
return tree
@must_be_contributor_or_public
def get_folder_pointers(auth, node, **kwargs):
if not node.is_folder:
return []
nodes = [
each.resolve()._id
for each in node.nodes
if each is not None and not each.is_deleted and not each.primary
]
return nodes
@must_be_contributor_or_public
def get_forks(auth, node, **kwargs):
fork_list = sorted(node.forks, key=lambda fork: fork.forked_date, reverse=True)
return _render_nodes(nodes=fork_list, auth=auth)
@must_be_contributor_or_public
def get_registrations(auth, node, **kwargs):
registrations = [n for n in reversed(node.node__registrations) if not n.is_deleted] # get all registrations, including archiving
return _render_nodes(registrations, auth)
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [Node.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
name = request.json.get('value', '')
try:
validate_title(name)
except ValidationValueError as e:
message = 'Invalid link name.' if e.message == 'Invalid title.' else e.message
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=message)
)
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
new_name = strip_html(name)
private_link.name = new_name
private_link.save()
return new_name
else:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='View-only link not found.')
)
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
title = node.title
if node.is_registration:
title += ' (registration)'
first_author = node.visible_contributors[0]
return {
'id': node._id,
'title': title,
'firstAuthor': first_author.family_name or first_author.given_name or first_author.full_name,
'etal': len(node.visible_contributors) > 1,
}
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = Node.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Build ODM query
title_query = Q('title', 'icontains', query)
not_deleted_query = Q('is_deleted', 'eq', False)
visibility_query = Q('contributors', 'eq', auth.user)
no_folders_query = Q('is_folder', 'eq', False)
if include_public:
visibility_query = visibility_query | Q('is_public', 'eq', True)
odm_query = title_query & not_deleted_query & visibility_query & no_folders_query
# Exclude current node from query if provided
if node:
nin = [node._id] + node.node_ids
odm_query = (
odm_query &
Q('_id', 'nin', nin)
)
nodes = Node.find(odm_query)
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in islice(nodes, start, start + size)
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
try:
_add_pointers(to_node, [pointer_node], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 200, None
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer_from_folder(auth, node, pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointers_from_folder(auth, node, **kwargs):
"""Remove multiple pointers from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
pointer_ids = request.json.get('pointerIds')
if pointer_ids is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_id in pointer_ids:
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if len(node.visible_contributor_ids) > 1:
ret += ' et al.'
return ret
def serialize_pointer(pointer, auth):
node = get_pointer_parent(pointer)
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
# exclude folders
return {'pointed': [
serialize_pointer(each, auth)
for each in node.pointed
if not get_pointer_parent(each).is_folder
]}
| {
"content_hash": "53fd9312381f40bf4d186ab323237ef0",
"timestamp": "",
"source": "github",
"line_count": 1436,
"max_line_length": 133,
"avg_line_length": 31.28899721448468,
"alnum_prop": 0.6028354588146269,
"repo_name": "billyhunt/osf.io",
"id": "3bf6f4a81035470ad7bc805fe6bd4c9d3bd08897",
"size": "44955",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/project/views/node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "133568"
},
{
"name": "HTML",
"bytes": "58491"
},
{
"name": "JavaScript",
"bytes": "1369885"
},
{
"name": "Mako",
"bytes": "619064"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "4835804"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
} |
from pymatgen.util.testing import PymatgenTest
import unittest
import os
import warnings
from pymatgen.analysis.solar.slme import optics, slme
class SolarTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_slme_from_vasprun(self):
path = os.path.join(os.path.dirname(__file__), "vasprun.xml")
en, abz, dirgap, indirgap = optics(path)
abz = abz * 100.0
eff = slme(en, abz, indirgap, indirgap, plot_current_voltage=False)
self.assertAlmostEqual(eff, 27.728998512472298, places=5)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "cfaee72e23788fd3efbd47314a91c6ab",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 27.384615384615383,
"alnum_prop": 0.6699438202247191,
"repo_name": "mbkumar/pymatgen",
"id": "4e9b5396920af6b3a405fbff9035dc9090072ea3",
"size": "712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/solar/tests/test_slme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6933839"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
} |
from django.contrib import messages
from django.core.exceptions import (
ObjectDoesNotExist, MultipleObjectsReturned, PermissionDenied)
from django.core.urlresolvers import reverse
from oscar.core.loading import get_model
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.views.generic import (ListView, CreateView, UpdateView, DeleteView,
View, FormView)
from django.utils.translation import ugettext_lazy as _
from oscar.apps.customer.mixins import PageTitleMixin
from oscar.core.loading import get_classes
WishList = get_model('wishlists', 'WishList')
Line = get_model('wishlists', 'Line')
Product = get_model('catalogue', 'Product')
WishListForm, LineFormset = get_classes('wishlists.forms',
['WishListForm', 'LineFormset'])
class WishListListView(PageTitleMixin, ListView):
context_object_name = active_tab = "wishlists"
template_name = 'customer/wishlists/wishlists_list.html'
page_title = _('Wish Lists')
def get_queryset(self):
return self.request.user.wishlists.all()
class WishListDetailView(PageTitleMixin, FormView):
"""
This view acts as a DetailView for a wish list and allows updating the
quantities of products.
It is implemented as FormView because it's easier to adapt a FormView to
display a product then adapt a DetailView to handle form validation.
"""
template_name = 'customer/wishlists/wishlists_detail.html'
active_tab = "wishlists"
form_class = LineFormset
def dispatch(self, request, *args, **kwargs):
self.object = self.get_wishlist_or_404(kwargs['key'], request.user)
return super(WishListDetailView, self).dispatch(request, *args,
**kwargs)
def get_wishlist_or_404(self, key, user):
wishlist = get_object_or_404(WishList, key=key)
if wishlist.is_allowed_to_see(user):
return wishlist
else:
raise Http404
def get_page_title(self):
return self.object.name
def get_form_kwargs(self):
kwargs = super(WishListDetailView, self).get_form_kwargs()
kwargs['instance'] = self.object
return kwargs
def get_context_data(self, **kwargs):
ctx = super(WishListDetailView, self).get_context_data(**kwargs)
ctx['wishlist'] = self.object
other_wishlists = self.request.user.wishlists.exclude(
pk=self.object.pk)
ctx['other_wishlists'] = other_wishlists
return ctx
def form_valid(self, form):
for subform in form:
if subform.cleaned_data['quantity'] <= 0:
subform.instance.delete()
else:
subform.save()
messages.success(self.request, _('Quantities updated.'))
return HttpResponseRedirect(reverse('customer:wishlists-detail',
kwargs={'key': self.object.key}))
class WishListCreateView(PageTitleMixin, CreateView):
"""
Create a new wishlist
If a product ID is assed as a kwargs, then this product will be added to
the wishlist.
"""
model = WishList
template_name = 'customer/wishlists/wishlists_form.html'
active_tab = "wishlists"
page_title = _('Create a new wish list')
form_class = WishListForm
product = None
def dispatch(self, request, *args, **kwargs):
if 'product_pk' in kwargs:
try:
self.product = Product.objects.get(pk=kwargs['product_pk'])
except ObjectDoesNotExist:
messages.error(
request, _("The requested product no longer exists"))
return HttpResponseRedirect(reverse('wishlists-create'))
return super(WishListCreateView, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(WishListCreateView, self).get_context_data(**kwargs)
ctx['product'] = self.product
return ctx
def get_form_kwargs(self):
kwargs = super(WishListCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
wishlist = form.save()
if self.product:
wishlist.add(self.product)
msg = _("Your wishlist has been created and '%(name)s "
"has been added") \
% {'name': self.product.get_title()}
else:
msg = _("Your wishlist has been created")
messages.success(self.request, msg)
return HttpResponseRedirect(wishlist.get_absolute_url())
class WishListCreateWithProductView(View):
"""
Create a wish list and immediately add a product to it
"""
def post(self, request, *args, **kwargs):
product = get_object_or_404(Product, pk=kwargs['product_pk'])
wishlists = request.user.wishlists.all()
if len(wishlists) == 0:
wishlist = request.user.wishlists.create()
else:
# This shouldn't really happen but we default to using the first
# wishlist for a user if one already exists when they make this
# request.
wishlist = wishlists[0]
wishlist.add(product)
messages.success(
request, _("%(title)s has been added to your wishlist") % {
'title': product.get_title()})
return HttpResponseRedirect(request.META.get(
'HTTP_REFERER', wishlist.get_absolute_url()))
class WishListUpdateView(PageTitleMixin, UpdateView):
model = WishList
template_name = 'customer/wishlists/wishlists_form.html'
active_tab = "wishlists"
form_class = WishListForm
context_object_name = 'wishlist'
def get_page_title(self):
return self.object.name
def get_object(self, queryset=None):
return get_object_or_404(WishList, owner=self.request.user,
key=self.kwargs['key'])
def get_form_kwargs(self):
kwargs = super(WishListUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_success_url(self):
messages.success(
self.request, _("Your '%s' wishlist has been updated")
% self.object.name)
return reverse('customer:wishlists-list')
class WishListDeleteView(PageTitleMixin, DeleteView):
model = WishList
template_name = 'customer/wishlists/wishlists_delete.html'
active_tab = "wishlists"
def get_page_title(self):
return _(u'Delete %s') % self.object.name
def get_object(self, queryset=None):
return get_object_or_404(WishList, owner=self.request.user,
key=self.kwargs['key'])
def get_success_url(self):
messages.success(
self.request, _("Your '%s' wish list has been deleted")
% self.object.name)
return reverse('customer:wishlists-list')
class WishListAddProduct(View):
"""
Adds a product to a wish list.
- If the user doesn't already have a wishlist then it will be created for
them.
- If the product is already in the wish list, its quantity is increased.
"""
def dispatch(self, request, *args, **kwargs):
self.product = get_object_or_404(Product, pk=kwargs['product_pk'])
self.wishlist = self.get_or_create_wishlist(request, *args, **kwargs)
return super(WishListAddProduct, self).dispatch(request)
def get_or_create_wishlist(self, request, *args, **kwargs):
wishlists = request.user.wishlists.all()
num_wishlists = len(wishlists)
if num_wishlists == 0:
return request.user.wishlists.create()
wishlist = wishlists[0]
if not wishlist.is_allowed_to_edit(request.user):
raise PermissionDenied
return wishlist
def get(self, request, *args, **kwargs):
# This is nasty as we shouldn't be performing write operations on a GET
# request. It's only included as the UI of the product detail page
# allows a wishlist to be selected from a dropdown.
return self.add_product()
def post(self, request, *args, **kwargs):
return self.add_product()
def add_product(self):
self.wishlist.add(self.product)
msg = _("'%s' was added to your wish list." % self.product.get_title())
messages.success(self.request, msg)
return HttpResponseRedirect(
self.request.META.get('HTTP_REFERER',
self.product.get_absolute_url()))
class LineMixin(object):
"""
Handles fetching both a wish list and a product
Views using this mixin must be passed two keyword arguments:
* key: The key of a wish list
* line_pk: The primary key of the wish list line
or
* product_pk: The primary key of the product
"""
def fetch_line(self, user, wishlist_key, line_pk=None, product_pk=None):
self.wishlist = WishList._default_manager.get(
owner=user, key=wishlist_key)
if line_pk is not None:
self.line = self.wishlist.lines.get(pk=line_pk)
else:
self.line = self.wishlist.lines.get(product_id=product_pk)
self.product = self.line.product
class WishListRemoveProduct(LineMixin, PageTitleMixin, DeleteView):
template_name = 'customer/wishlists/wishlists_delete_product.html'
active_tab = "wishlists"
def get_page_title(self):
return _(u'Remove %s') % self.object.get_title()
def get_object(self, queryset=None):
self.fetch_line(
self.request.user, self.kwargs['key'],
self.kwargs.get('line_pk'), self.kwargs.get('product_pk'))
return self.line
def get_context_data(self, **kwargs):
ctx = super(WishListRemoveProduct, self).get_context_data(**kwargs)
ctx['wishlist'] = self.wishlist
ctx['product'] = self.product
return ctx
def get_success_url(self):
msg = _("'%(title)s' was removed from your '%(name)s' wish list") % {
'title': self.line.get_title(),
'name': self.wishlist.name}
messages.success(self.request, msg)
# We post directly to this view on product pages; and should send the
# user back there if that was the case
referrer = self.request.META.get('HTTP_REFERER', '')
if self.product and self.product.get_absolute_url() in referrer:
return referrer
else:
return reverse(
'customer:wishlists-detail', kwargs={'key': self.wishlist.key})
class WishListMoveProductToAnotherWishList(LineMixin, View):
def dispatch(self, request, *args, **kwargs):
try:
self.fetch_line(request.user, kwargs['key'],
product_pk=kwargs['product_pk'])
except (ObjectDoesNotExist, MultipleObjectsReturned):
raise Http404
return super(WishListMoveProductToAnotherWishList, self).dispatch(
request, *args, **kwargs)
def get(self, request, *args, **kwargs):
to_wishlist = get_object_or_404(
WishList, owner=request.user, key=kwargs['to_key'])
self.line.wishlist = to_wishlist
self.line.save()
msg = _("'%(title)s' moved to '%(name)s' wishlist") % {
'title': self.product.get_title(),
'name': to_wishlist.name}
messages.success(self.request, msg)
default_url = reverse(
'customer:wishlists-detail', kwargs={'key': self.wishlist.key})
return HttpResponseRedirect(self.request.META.get(
'HTTP_REFERER', default_url))
| {
"content_hash": "26a057f1d338f51e9f1d33ac5c1897c8",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 79,
"avg_line_length": 36.55727554179567,
"alnum_prop": 0.6236449864498645,
"repo_name": "MrReN/django-oscar",
"id": "06fbd529829fd92bb05aeb939e26f140e0ec3200",
"size": "11832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/apps/customer/wishlists/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from wtforms import Form, TextAreaField, StringField, BooleanField, validators
class NLUForm(Form):
text = TextAreaField('text', default='', validators=[
validators.DataRequired("Text is required field")
])
entities = BooleanField('Extract entities', default=False)
keywords = BooleanField('Extract keywords', default=False)
relations = BooleanField('Extract relations', default=False)
semantic_roles = BooleanField('Extract semantic roles', default=False)
| {
"content_hash": "879a347c131a254e741f767488cc1fdd",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 44.63636363636363,
"alnum_prop": 0.7372708757637475,
"repo_name": "TokhirUmarov/causal-relation-app",
"id": "8363f25b3ac24c9dac8c930e5f664f66b398f16a",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms/nlu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "263446"
},
{
"name": "HTML",
"bytes": "37790"
},
{
"name": "JavaScript",
"bytes": "858"
},
{
"name": "Python",
"bytes": "17813"
}
],
"symlink_target": ""
} |
"""Python code format's checker.
By default try to follow Guido's style guide :
http://www.python.org/doc/essays/styleguide.html
Some parts of the process_token method is based from The Tab Nanny std module.
"""
import re
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
from logilab.common.textutils import pretty_match
from logilab.astng import nodes
from pylint.interfaces import IRawChecker, IASTNGChecker
from pylint.checkers import BaseRawChecker
MSGS = {
'C0301': ('Line too long (%s/%s)',
'Used when a line is longer than a given number of characters.'),
'C0302': ('Too many lines in module (%s)', # was W0302
'Used when a module has too much lines, reducing its readability.'
),
'W0311': ('Bad indentation. Found %s %s, expected %s',
'Used when an unexpected number of indentation\'s tabulations or '
'spaces has been found.'),
'W0312': ('Found indentation with %ss instead of %ss',
'Used when there are some mixed tabs and spaces in a module.'),
'W0301': ('Unnecessary semicolon', # was W0106
'Used when a statement is ended by a semi-colon (";"), which \
isn\'t necessary (that\'s python, not C ;).'),
'F0321': ('Format detection error in %r',
'Used when an unexpected error occurred in bad format detection.'
'Please report the error if it occurs.'),
'C0321': ('More than one statement on a single line',
'Used when more than on statement are found on the same line.'),
'C0322': ('Operator not preceded by a space\n%s',
'Used when one of the following operator (!= | <= | == | >= | < '
'| > | = | \+= | -= | \*= | /= | %) is not preceded by a space.'),
'C0323': ('Operator not followed by a space\n%s',
'Used when one of the following operator (!= | <= | == | >= | < '
'| > | = | \+= | -= | \*= | /= | %) is not followed by a space.'),
'C0324': ('Comma not followed by a space\n%s',
'Used when a comma (",") is not followed by a space.'),
'W0331': ('Use of the <> operator',
'Used when the deprecated "<>" operator is used instead \
of "!=".'),
'W0332': ('Use l as long integer identifier',
'Used when a lower case "l" is used to mark a long integer. You '
'should use a upper case "L" since the letter "l" looks too much '
'like the digit "1"'),
'W0333': ('Use of the `` operator',
'Used when the deprecated "``" (backtick) operator is used '
'instead of the str() function.'),
}
# simple quoted string rgx
SQSTRING_RGX = r'"([^"\\]|\\.)*?"'
# simple apostrophed rgx
SASTRING_RGX = r"'([^'\\]|\\.)*?'"
# triple quoted string rgx
TQSTRING_RGX = r'"""([^"]|("(?!"")))*?(""")'
# triple apostrophed string rgx # FIXME english please
TASTRING_RGX = r"'''([^']|('(?!'')))*?(''')"
# finally, the string regular expression
STRING_RGX = re.compile('(%s)|(%s)|(%s)|(%s)' % (TQSTRING_RGX, TASTRING_RGX,
SQSTRING_RGX, SASTRING_RGX),
re.MULTILINE|re.DOTALL)
COMMENT_RGX = re.compile("#.*$", re.M)
OPERATORS = r'!=|<=|==|>=|<|>|=|\+=|-=|\*=|/=|%'
OP_RGX_MATCH_1 = r'[^(]*(?<!\s|\^|<|>|=|\+|-|\*|/|!|%%|&|\|)(%s).*' % OPERATORS
OP_RGX_SEARCH_1 = r'(?<!\s|\^|<|>|=|\+|-|\*|/|!|%%|&|\|)(%s)' % OPERATORS
OP_RGX_MATCH_2 = r'[^(]*(%s)(?!\s|=|>|<).*' % OPERATORS
OP_RGX_SEARCH_2 = r'(%s)(?!\s|=|>)' % OPERATORS
BAD_CONSTRUCT_RGXS = (
(re.compile(OP_RGX_MATCH_1, re.M),
re.compile(OP_RGX_SEARCH_1, re.M),
'C0322'),
(re.compile(OP_RGX_MATCH_2, re.M),
re.compile(OP_RGX_SEARCH_2, re.M),
'C0323'),
(re.compile(r'.*,[^\s)].*', re.M),
re.compile(r',[^\s)]', re.M),
'C0324'),
)
def get_string_coords(line):
"""return a list of string positions (tuple (start, end)) in the line
"""
result = []
for match in re.finditer(STRING_RGX, line):
result.append( (match.start(), match.end()) )
return result
def in_coords(match, string_coords):
"""return true if the match is in the string coord"""
mstart = match.start()
for start, end in string_coords:
if mstart >= start and mstart < end:
return True
return False
def check_line(line, writer):
"""check a line for a bad construction
if it founds one, return a message describing the problem
else return None
"""
cleanstr = COMMENT_RGX.sub('', STRING_RGX.sub('', line))
for rgx_match, rgx_search, msg_id in BAD_CONSTRUCT_RGXS:
if rgx_match.match(cleanstr):
string_positions = get_string_coords(line)
for match in re.finditer(rgx_search, line):
if not in_coords(match, string_positions):
return msg_id, pretty_match(match, line.rstrip())
#writer.add_message('F0321', line=line, args=line)
class FormatChecker(BaseRawChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
* use of <> instead of !=
"""
__implements__ = (IRawChecker, IASTNGChecker)
# configuration section name
name = 'format'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('max-line-length',
{'default' : 80, 'type' : "int", 'metavar' : '<int>',
'help' : 'Maximum number of characters on a single line.'}),
('max-module-lines',
{'default' : 1000, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of lines in a module'}
),
('indent-string',
{'default' : ' ', 'type' : "string", 'metavar' : '<string>',
'help' : 'String used as indentation unit. This is usually \
" " (4 spaces) or "\\t" (1 tab).'}),
)
def __init__(self, linter=None):
BaseRawChecker.__init__(self, linter)
self._lines = None
self._visited_lines = None
def new_line(self, tok_type, line, line_num, junk):
"""a new line has been encountered, process it if necessary"""
if not tok_type in junk:
self._lines[line_num] = line.split('\n')[0]
self.check_lines(line, line_num)
def process_tokens(self, tokens):
"""process tokens and search for :
_ non strict indentation (i.e. not always using the <indent> parameter as
indent unit)
_ too long lines (i.e. longer than <max_chars>)
_ optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
indent = tokenize.INDENT
dedent = tokenize.DEDENT
newline = tokenize.NEWLINE
junk = (tokenize.COMMENT, tokenize.NL)
indents = [0]
check_equal = 0
line_num = 0
previous = None
self._lines = {}
self._visited_lines = {}
for (tok_type, token, start, _, line) in tokens:
if start[0] != line_num:
if previous is not None and previous[0] == tokenize.OP and previous[1] == ';':
self.add_message('W0301', line=previous[2])
previous = None
line_num = start[0]
self.new_line(tok_type, line, line_num, junk)
if tok_type not in (indent, dedent, newline) + junk:
previous = tok_type, token, start[0]
if tok_type == tokenize.OP:
if token == '<>':
self.add_message('W0331', line=line_num)
elif tok_type == tokenize.NUMBER:
if token.endswith('l'):
self.add_message('W0332', line=line_num)
elif tok_type == newline:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif tok_type == indent:
check_equal = 0
self.check_indent_level(token, indents[-1]+1, line_num)
indents.append(indents[-1]+1)
elif tok_type == dedent:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = 1
if len(indents) > 1:
del indents[-1]
elif check_equal and tok_type not in junk:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
self.check_indent_level(line, indents[-1], line_num)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.config.max_module_lines:
self.add_message('C0302', args=line_num, line=1)
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
# py2.5 try: except: finally:
if not (isinstance(node, nodes.TryExcept)
and isinstance(node.parent, nodes.TryFinally)
and node.fromlineno == node.parent.fromlineno):
self.add_message('C0321', node=node)
self._visited_lines[line] = 2
return
if self._visited_lines.has_key(line):
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in xrange(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append('')
try:
msg_def = check_line('\n'.join(lines), self)
if msg_def:
self.add_message(msg_def[0], node=node, args=msg_def[1])
except KeyError:
# FIXME: internal error !
pass
def visit_backquote(self, node):
self.add_message('W0333', node=node)
def check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
for line in lines.splitlines():
if len(line) > max_chars:
self.add_message('C0301', line=i, args=(len(line), max_chars))
i += 1
def check_indent_level(self, string, expected, line_num):
"""return the indent level of the string
"""
indent = self.config.indent_string
if indent == '\\t': # \t is not interpreted in the configuration file
indent = '\t'
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ''
while string and string[0] in ' \t':
if string[0] != indent[0]:
if string[0] == '\t':
args = ('tab', 'space')
else:
args = ('space', 'tab')
self.add_message('W0312', args=args, line=line_num)
return level
suppl += string[0]
string = string [1:]
if level != expected or suppl:
i_type = 'spaces'
if indent[0] == '\t':
i_type = 'tabs'
self.add_message('W0311', line=line_num,
args=(level * unit_size + len(suppl), i_type,
expected * unit_size))
def register(linter):
"""required method to auto register this checker """
linter.register_checker(FormatChecker(linter))
| {
"content_hash": "013c5a6a80d292d6b2b0ad8303a251ee",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 94,
"avg_line_length": 39.429429429429426,
"alnum_prop": 0.538004569687738,
"repo_name": "dbbhattacharya/kitsune",
"id": "751c88affd5961c066c2381316093f2fbed583e8",
"size": "13950",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/pylint/checkers/format.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
} |
import ffvideo
from PIL import ImageEnhance
import math
ORIGIN = (200, 180)
WIDTH = 300
FINAL_SIZE = (112, 80)
PADDED_CHAR_WIDTH = 16 # power of 2 to ensure sectors end at a line boundary
FRAME_DATA_BYTES = PADDED_CHAR_WIDTH * FINAL_SIZE[1]
FRAME_BYTES = 512 * int(math.ceil(FRAME_DATA_BYTES / 512.0))
PADDING_BYTES = FRAME_BYTES - FRAME_DATA_BYTES
ASPECT = float(FINAL_SIZE[0]) / FINAL_SIZE[1]
HEIGHT = int(WIDTH / ASPECT)
START_FRAME = 5
vs = ffvideo.VideoStream('clips/matt-ontheradio-centre.m4v')
frames = []
for i, frame in enumerate(vs):
img = frame.image()
x0, y0 = ORIGIN
img = img.crop((x0, y0, x0 + WIDTH, y0 + HEIGHT))
img = img.resize(FINAL_SIZE)
img = img.crop((0, 0, PADDED_CHAR_WIDTH * 8, FINAL_SIZE[1]))
bright = ImageEnhance.Brightness(img)
img = bright.enhance(3.0)
contrast = ImageEnhance.Contrast(img)
img = contrast.enhance(3.0)
img = img.convert('1')
# img.save("screens/matt-ontheradio2/%s.png" % i)
frames.append(img.tostring() + ("\0" * PADDING_BYTES))
f = open('build/ontheradio_sprites.bin', 'w')
f.write(''.join(frames[START_FRAME:START_FRAME+75]))
f.close
| {
"content_hash": "d21f1f45f236d4e775d4c4509a33244f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 27.024390243902438,
"alnum_prop": 0.6904332129963899,
"repo_name": "gasman/kisskill",
"id": "add9b6e15952deb1e9b81c8abe882dbba32f3493",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/matt_ontheradio_convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "122686"
},
{
"name": "C",
"bytes": "39079"
},
{
"name": "Perl",
"bytes": "352"
},
{
"name": "Python",
"bytes": "12918"
},
{
"name": "Ruby",
"bytes": "8669"
},
{
"name": "Visual Basic",
"bytes": "429"
}
],
"symlink_target": ""
} |
__author__ = 'Debanjan Mahata'
import time
from twython import Twython, TwythonError
from pymongo import MongoClient
import TwitterAuthentication as t_auth
def collect_tweets(path_name):
emotion_sentiment_mapping = {"joy":"positive","sadness":"negative","anger":"negative","fear":"negative","disgust":"negative"}
try:
#connecting to MongoDB database
mongoObj = MongoClient()
#setting the MongoDB database
db = mongoObj["TwitterSentimentAnalysis"]
#setting the collection in the database for storing the Tweets
collection = db["emotion_labeled_tweets"]
except:
print "Could not connect to the MongoDb Database, recheck the connection and the database"
try:
fp = open(path_name)
except IOError:
print "Please provide the right path to the file named labeledTweetSentimentCorpus.csv"
request_count = 0
key_count = 0
auth_key = t_auth.keyList[key_count%11]
for entry in fp:
tweet_id = entry.rstrip().split(":")[0]
try:
tweet_sentiment = emotion_sentiment_mapping[entry.rstrip().split("::")[1].strip()]
except:
tweet_sentiment = ""
twitter = Twython(auth_key["APP_KEY"],auth_key["APP_SECRET"],auth_key["OAUTH_TOKEN"],auth_key["OAUTH_TOKEN_SECRET"])
if request_count == 1499:
request_count = 0
key_count += 1
auth_key = t_auth.keyList[key_count%11]
time.sleep(60)
try:
twitter_status = twitter.show_status(id = tweet_id)
twitter_status["sentiment_label"] = tweet_sentiment
language = twitter_status["lang"]
if language == "en" and tweet_sentiment:
collection.insert(twitter_status)
else:
pass
except TwythonError:
pass
request_count += 1
if __name__ == "__main__":
#call method for collecting and storing the tweets in a MongoDb collection
collect_tweets("../CorpusAndLexicons/labeledTweetEmotionCorpus.txt")
| {
"content_hash": "2d72d740978b816059daabff8bece538",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 129,
"avg_line_length": 28.86111111111111,
"alnum_prop": 0.6198267564966313,
"repo_name": "dxmahata/TwitterSentimentAnalysis",
"id": "2323dc194cc66f49a8844cba143a1dcc53736e33",
"size": "2078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TwitterDataCollect/collectEmotionLabeledTweets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "163288"
},
{
"name": "Python",
"bytes": "1069666"
}
],
"symlink_target": ""
} |
import uuid
from decimal import Decimal
from typing import List
import pytest
from measurement.measures import Weight
from prices import Money
from ....app.models import App
from ....plugins.manager import get_plugins_manager
from ....plugins.webhook.plugin import WebhookPlugin
from ....shipping.interface import ShippingMethodData
from ....webhook.event_types import WebhookEventSyncType
from ....webhook.models import Webhook, WebhookEvent
@pytest.fixture
def webhook_plugin(settings):
def factory() -> WebhookPlugin:
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
manager = get_plugins_manager()
return manager.global_plugins[0]
return factory
@pytest.fixture()
def available_shipping_methods_factory():
def factory(num_methods=1) -> List[ShippingMethodData]:
methods = []
for i in range(num_methods):
methods.append(
ShippingMethodData(
id=str(i),
price=Money(Decimal("10"), "usd"),
name=uuid.uuid4().hex,
maximum_order_weight=Weight(kg=0),
minimum_order_weight=Weight(kg=0),
maximum_delivery_days=0,
minimum_delivery_days=5,
)
)
return methods
return factory
@pytest.fixture
def shipping_app_factory(db, permission_manage_orders, permission_manage_checkouts):
def create_app(app_name="Shipping App"):
app = App.objects.create(name=app_name, is_active=True)
app.tokens.create(name="Default")
app.permissions.add(permission_manage_orders)
app.permissions.add(permission_manage_checkouts)
webhook = Webhook.objects.create(
name="shipping-webhook-1",
app=app,
target_url="https://shipping-gateway.com/api/",
)
webhook.events.bulk_create(
[
WebhookEvent(
event_type=WebhookEventSyncType.CHECKOUT_FILTER_SHIPPING_METHODS,
webhook=webhook,
),
WebhookEvent(
event_type=WebhookEventSyncType.ORDER_FILTER_SHIPPING_METHODS,
webhook=webhook,
),
]
)
return app
return create_app
| {
"content_hash": "75e1c4c34a44caf840fadc350aaac00e",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 85,
"avg_line_length": 31.30666666666667,
"alnum_prop": 0.6026405451448041,
"repo_name": "mociepka/saleor",
"id": "5200b1302d7f6615af1a4f18c7dcf83255adae6a",
"size": "2348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/plugins/webhook/tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""
.. _stock_market:
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to expain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD
import datetime
import numpy as np
import pylab as pl
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 01, 01)
d2 = datetime.datetime(2008, 01, 01)
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'KFT': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'NWS': 'News Corp',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WAG': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(symbol_dict.items()).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
pl.figure(1, facecolor='w', figsize=(10, 8))
pl.clf()
ax = pl.axes([0., 0., 1., 1.])
pl.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
pl.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=pl.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=pl.cm.hot_r,
norm=pl.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
pl.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=pl.cm.spectral(label / float(n_labels)),
alpha=.6))
pl.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
pl.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
pl.show()
| {
"content_hash": "8a2c9a557f408bc6d0892d7aecb14b5e",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 31.8062015503876,
"alnum_prop": 0.6207652936875457,
"repo_name": "jmargeta/scikit-learn",
"id": "f2f44a2f85be76ab2e322580ddeaea98cd0c7651",
"size": "8206",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/applications/plot_stock_market.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Tutorial: Creating Visuals
==========================
02. Making physical measurements
--------------------------------
In the last tutorial we created a simple Visual subclass that draws a
rectangle. In this tutorial, we will make two additions:
1. Draw a rectangular border instead of a solid rectangle
2. Make the border a fixed pixel width, even when displayed inside a
user-zoomable ViewBox.
The border is made by drawing a line_strip with 10 vertices::
1--------------3
| |
| 2------4 | [ note that points 9 and 10 are
| | | | the same as points 1 and 2 ]
| 8------6 |
| |
7--------------5
In order to ensure that the border has a fixed width in pixels, we need to
adjust the spacing between the inner and outer rectangles whenever the user
changes the zoom of the ViewBox.
How? Recall that each
time the visual is drawn, it is given a TransformSystem instance that carries
information about the size of logical and physical pixels relative to the
visual [link to TransformSystem documentation]. Essentially, we have 4
coordinate systems:
Visual -> Document -> Framebuffer -> Render
The user specifies the position and size of the rectangle in Visual
coordinates, and in [tutorial 1] we used the vertex shader to convert directly
from Visual coordinates to render coordinates. In this tutorial we will
convert first to document coordinates, then make the adjustment for the border
width, then convert the remainder of the way to render coordinates.
Let's say, for example that the user specifies the box width to be 20, and the
border width to be 5. To draw the border correctly, we cannot simply
add/subtract 5 from the inner rectangle coordinates; if the user zooms
in by a factor of 2 then the border would become 10 px wide.
Another way to say this is that a vector with length=1 in Visual coordinates
does not _necessarily_ have a length of 1 pixel on the canvas. Instead, we must
make use of the Document coordinate system, in which a vector of length=1
does correspond to 1 pixel.
There are a few ways we could make this measurement of pixel length. Here's
how we'll do it in this tutorial:
1. Begin with vertices for a rectangle with border width 0 (that is, vertex
1 is the same as vertex 2, 3=4, and so on).
2. In the vertex shader, first map the vertices to the document coordinate
system using the visual->document transform.
3. Add/subtract the line width from the mapped vertices.
4. Map the rest of the way to render coordinates with a second transform:
document->framebuffer->render.
Note that this problem _cannot_ be solved using a simple scale factor! It is
necessary to use these transformations in order to draw correctly when there
is rotation or anosotropic scaling involved.
"""
from vispy import app, gloo, visuals, scene
import numpy as np
vertex_shader = """
void main() {
// First map the vertex to document coordinates
vec4 doc_pos = $visual_to_doc(vec4($position, 0, 1));
// Also need to map the adjustment direction vector, but this is tricky!
// We need to adjust separately for each component of the vector:
vec4 adjusted;
if ( $adjust_dir.x == 0. ) {
// If this is an outer vertex, no adjustment for line weight is needed.
// (In fact, trying to make the adjustment would result in no
// triangles being drawn, hence the if/else block)
adjusted = doc_pos;
}
else {
// Inner vertexes must be adjusted for line width, but this is
// surprisingly tricky given that the rectangle may have been scaled
// and rotated!
vec4 doc_x = $visual_to_doc(vec4($adjust_dir.x, 0, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
vec4 doc_y = $visual_to_doc(vec4(0, $adjust_dir.y, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
doc_x = normalize(doc_x);
doc_y = normalize(doc_y);
// Now doc_x + doc_y points in the direction we need in order to
// correct the line weight of _both_ segments, but the magnitude of
// that correction is wrong. To correct it we first need to
// measure the width that would result from using doc_x + doc_y:
vec4 proj_y_x = dot(doc_x, doc_y) * doc_x; // project y onto x
float cur_width = length(doc_y - proj_y_x); // measure current weight
// And now we can adjust vertex position for line width:
adjusted = doc_pos + ($line_width / cur_width) * (doc_x + doc_y);
}
// Finally map the remainder of the way to render coordinates
gl_Position = $doc_to_render(adjusted);
}
"""
fragment_shader = """
void main() {
gl_FragColor = $color;
}
"""
class MyRectVisual(visuals.Visual):
"""Visual that draws a rectangular outline.
Parameters
----------
x : float
x coordinate of rectangle origin
y : float
y coordinate of rectangle origin
w : float
width of rectangle
h : float
height of rectangle
weight : float
width of border (in px)
"""
def __init__(self, x, y, w, h, weight=4.0):
visuals.Visual.__init__(self, vertex_shader, fragment_shader)
# 10 vertices for 8 triangles (using triangle_strip) forming a
# rectangular outline
self.vert_buffer = gloo.VertexBuffer(np.array([
[x, y],
[x, y],
[x+w, y],
[x+w, y],
[x+w, y+h],
[x+w, y+h],
[x, y+h],
[x, y+h],
[x, y],
[x, y],
], dtype=np.float32))
# Direction each vertex should move to correct for line width
# (the length of this vector will be corrected in the shader)
self.adj_buffer = gloo.VertexBuffer(np.array([
[0, 0],
[1, 1],
[0, 0],
[-1, 1],
[0, 0],
[-1, -1],
[0, 0],
[1, -1],
[0, 0],
[1, 1],
], dtype=np.float32))
self.shared_program.vert['position'] = self.vert_buffer
self.shared_program.vert['adjust_dir'] = self.adj_buffer
self.shared_program.vert['line_width'] = weight
self.shared_program.frag['color'] = (1, 0, 0, 1)
self.set_gl_state(cull_face=False)
self._draw_mode = 'triangle_strip'
def _prepare_transforms(self, view):
# Set the two transforms required by the vertex shader:
tr = view.transforms
view_vert = view.view_program.vert
view_vert['visual_to_doc'] = tr.get_transform('visual', 'document')
view_vert['doc_to_render'] = tr.get_transform('document', 'render')
# As in the previous tutorial, we auto-generate a Visual+Node class for use
# in the scenegraph.
MyRect = scene.visuals.create_visual_node(MyRectVisual)
# Finally we will test the visual by displaying in a scene.
canvas = scene.SceneCanvas(keys='interactive', show=True)
# This time we add a ViewBox to let the user zoom/pan
view = canvas.central_widget.add_view()
view.camera = 'panzoom'
view.camera.rect = (0, 0, 800, 800)
# ..and add the rects to the view instead of canvas.scene
rects = [MyRect(100, 100, 200, 300, parent=view.scene),
MyRect(500, 100, 200, 300, parent=view.scene)]
# Again, rotate one rectangle to ensure the transforms are working as we
# expect.
tr = visuals.transforms.MatrixTransform()
tr.rotate(25, (0, 0, 1))
rects[1].transform = tr
# Add some text instructions
text = scene.visuals.Text("Drag right mouse button to zoom.", color='w',
anchor_x='left', parent=view, pos=(20, 30))
# ..and optionally start the event loop
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1:
app.run()
| {
"content_hash": "1ff4d9791ae8c62df7b4bcc9b34bab2f",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 79,
"avg_line_length": 36.481651376146786,
"alnum_prop": 0.6244184584433548,
"repo_name": "Eric89GXL/vispy",
"id": "b6b83dd97452466ed461d0cf7453d3489e90dc87",
"size": "8272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tutorial/visuals/T02_measurements.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2461885"
}
],
"symlink_target": ""
} |
"""
Importing concepts found at:
GitHub Developer Support
https://github.com/Esri/developer-support/tree/master/python/general-python/update-webmap-json
https://developers.arcgis.com/rest/analysis/api-reference/programmatically-accessing-analysis-services.htm
https://developers.arcgis.com/rest/analysis/api-reference/create-drivetime.htm
"""
import urllib
import urllib2
import json
import httplib
import time
import contextlib
import string
import smtplib
class ArcGISOnline(object):
def __init__(self, Username, Password):
self.username = Username
self.password = Password
self.__token = self.generateToken(self.username, self.password)['token']
self.__protocol = self.__useProtocol()
self.__orgInfo = self.__GetInfo()
self.__short = self.__orgInfo['urlKey']
self.__analysis_url = self.__orgInfo['helperServices']['analysis']['url']
def submit_request(self, request):
""" Returns the response from an HTTP request in json format."""
with contextlib.closing(urllib2.urlopen(request)) as response:
job_info = json.load(response)
return job_info
@staticmethod
def generateToken(username, password):
'''Generate a token using urllib modules for the input
username and password'''
url = "https://arcgis.com/sharing/generateToken"
data = {'username': username,
'password': password,
'referer' : 'https://arcgis.com',
'expires' : 1209600,
'f': 'json'}
data = urllib.urlencode(data)
request = urllib2.Request(url, data)
response = urllib2.urlopen(request)
return json.loads(response.read())
@property
def token(self):
'''Makes the non-public token read-only as a public token property'''
return self.__token
@property
def AnalysisURL(self):
'''Makes the non-public token read-only as a public token property'''
return self.__analysis_url
def __useProtocol(self):
tokenResponse = self.generateToken(self.username, self.password)
if tokenResponse['ssl']:
ssl = 'https'
else:
ssl = 'http'
return ssl
def __GetInfo(self):
'''Get information about the specified organization
this information includes the Short name of the organization (['urlKey'])
as well as the organization ID ['id']'''
URL= '{}://arcgis.com/sharing/rest/portals/self?f=json&token={}'.format(self.__protocol,self.__token)
request = urllib2.Request(URL)
response = urllib2.urlopen(request)
return json.loads(response.read())
def analysis_job(self, analysis_url, task, params):
""" Submits an Analysis job and returns the job URL for monitoring the job
status in addition to the json response data for the submitted job."""
# Unpack the Analysis job parameters as a dictionary and add token and
# formatting parameters to the dictionary. The dictionary is used in the
# HTTP POST request. Headers are also added as a dictionary to be included
# with the POST.
#
print("Submitting analysis job...")
params["f"] = "json"
params["token"] = self.__token
headers = {"Referer":"http://www.arcgis.com"}
task_url = "{}/{}".format(analysis_url, task)
submit_url = "{}/submitJob?".format(task_url)
request = urllib2.Request(submit_url, urllib.urlencode(params), headers)
analysis_response = self.submit_request(request)
if analysis_response:
# Print the response from submitting the Analysis job.
#
print(analysis_response)
return task_url, analysis_response
else:
raise Exception("Unable to submit analysis job.")
def analysis_job_status(self, task_url, job_info):
""" Tracks the status of the submitted Analysis job."""
if "jobId" in job_info:
# Get the id of the Analysis job to track the status.
#
job_id = job_info.get("jobId")
job_url = "{}/jobs/{}?f=json&token={}".format(task_url, job_id, self.__token)
request = urllib2.Request(job_url)
job_response = self.submit_request(request)
# Query and report the Analysis job status.
#
if "jobStatus" in job_response:
while not job_response.get("jobStatus") == "esriJobSucceeded":
time.sleep(5)
request = urllib2.Request(job_url)
job_response = self.submit_request(request)
print(job_response)
if job_response.get("jobStatus") == "esriJobFailed":
raise Exception("Job failed.")
elif job_response.get("jobStatus") == "esriJobCancelled":
raise Exception("Job cancelled.")
elif job_response.get("jobStatus") == "esriJobTimedOut":
raise Exception("Job timed out.")
if "results" in job_response:
return job_response
else:
raise Exception("No job results.")
else:
raise Exception("No job url.")
def analysis_job_results(self, task_url, job_info):
""" Use the job result json to get information about the feature service
created from the Analysis job."""
# Get the paramUrl to get information about the Analysis job results.
#
if "jobId" in job_info:
job_id = job_info.get("jobId")
if "results" in job_info:
results = job_info.get("results")
result_values = {}
for key in results.keys():
param_value = results[key]
if "paramUrl" in param_value:
param_url = param_value.get("paramUrl")
result_url = "{}/jobs/{}/{}?token={}&f=json".format(task_url,
job_id,
param_url,
self.__token)
request = urllib2.Request(result_url)
param_result = self.submit_request(request)
job_value = param_result.get("value")
result_values[key] = job_value
return result_values
else:
raise Exception("Unable to get analysis job results.")
else:
raise Exception("Unable to get analysis job results.")
def GetTravelModes(self, FORMOFTRAVEL):
url = "http://logistics.arcgis.com/arcgis/rest/services/World/Utilities/GPServer/GetTravelModes/execute?token={0}&f=pjson".format(self.__token)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
responseJ = json.loads(response.read())
for mode in responseJ['results'][0]['value']['features']:
if mode['attributes']['Name'] == FORMOFTRAVEL:
return mode['attributes']['TravelMode']
def CreateDriveTimes(self, featureLayerURL, WHERE_CLAUSE, breakValues, breakUnits, overlapPolicy, OUTPUTNAME):
data = {}
data['inputLayer'] = {'url' : featureLayerURL,
'filter' : WHERE_CLAUSE
}
data['travelMode'] = self.GetTravelModes("Driving Time")
data['breakValues'] = breakValues
data['breakUnits'] = breakUnits
data['overlapPolicy'] = overlapPolicy
data['outputName'] = {"serviceProperties": {"name": OUTPUTNAME}}
task_url, job_info = self.analysis_job(self.__analysis_url, "CreateDriveTimeAreas", data)
job_info = self.analysis_job_status(task_url, job_info)
job_values = self.analysis_job_results(task_url, job_info)
return job_values
if __name__ == '__main__':
username = "thisIsAUserName"
password = "MyPassword!"
onlineAccount = ArcGISOnline(username, password)
jobResults = onlineAccount.CreateDriveTimes("URLTOFEATURESERVICE", "OBJECTID = 4", [5.0, 10.0, 15.0], "Minutes", "Split", "ThisIsAnOutput")
print "DONE"
| {
"content_hash": "21fab9c358a66fc84702a15336788b72",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 151,
"avg_line_length": 41.9452736318408,
"alnum_prop": 0.5789348831692563,
"repo_name": "AkshayHarshe/developer-support",
"id": "46fe5edfc33fcea692d8b43f334d0cbdd4f8aab5",
"size": "8431",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "python/general-python/create-drive-times/create-drive-times.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3082"
},
{
"name": "C#",
"bytes": "177507"
},
{
"name": "C++",
"bytes": "40325"
},
{
"name": "CSS",
"bytes": "9188"
},
{
"name": "HTML",
"bytes": "347267"
},
{
"name": "Java",
"bytes": "94389"
},
{
"name": "JavaScript",
"bytes": "447091"
},
{
"name": "Objective-C",
"bytes": "38468"
},
{
"name": "PHP",
"bytes": "1194"
},
{
"name": "PLSQL",
"bytes": "25574"
},
{
"name": "Python",
"bytes": "118780"
},
{
"name": "QML",
"bytes": "46764"
},
{
"name": "R",
"bytes": "4328"
},
{
"name": "SQLPL",
"bytes": "6564"
},
{
"name": "Visual Basic",
"bytes": "1639"
}
],
"symlink_target": ""
} |
import logging
import re
from webkitpy.common.checkout.diff_parser import DiffParser
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.checkout.scm.detection import SCMDetector
_log = logging.getLogger(__name__)
class PatchReader(object):
"""Supports checking style in patches."""
def __init__(self, text_file_reader):
"""Create a PatchReader instance.
Args:
text_file_reader: A TextFileReader instance.
"""
self._text_file_reader = text_file_reader
def check(self, patch_string, fs=None):
"""Check style in the given patch."""
fs = fs or FileSystem()
patch_files = DiffParser(patch_string.splitlines()).files
# If the user uses git, checking subversion config file only once is enough.
call_only_once = True
for path, diff_file in patch_files.iteritems():
line_numbers = diff_file.added_or_modified_line_numbers()
_log.debug('Found %s new or modified lines in: %s', len(line_numbers), path)
if not line_numbers:
match = re.search(r"\s*png$", path)
if match and fs.exists(path):
if call_only_once:
self._text_file_reader.process_file(file_path=path, line_numbers=None)
cwd = FileSystem().getcwd()
detection = SCMDetector(fs, Executive()).detect_scm_system(cwd)
if detection.display_name() == "git":
call_only_once = False
continue
# Don't check files which contain only deleted lines
# as they can never add style errors. However, mark them as
# processed so that we count up number of such files.
self._text_file_reader.count_delete_only_file()
continue
self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
| {
"content_hash": "aa37b8baece5d7cc1fd4354ff6ba2ad5",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 94,
"avg_line_length": 39.51923076923077,
"alnum_prop": 0.6009732360097324,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "493b13cf02b97873d3db1d4dd8a37275669e96a6",
"size": "3696",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/style/patchreader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""This code example runs a report that with custom fields found in the line
items of an order.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import tempfile
# Import appropriate modules from the client library.
from googleads import dfp
from googleads import errors
# Set the ID of the order to get line items from.
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201403')
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201403')
# Filter for line items of a given order.
values = [{
'key': 'orderId',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}]
query = 'WHERE orderId = :orderId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Collect all line item custom field IDs for an order.
custom_field_ids = set()
# Get users by statement.
while True:
response = line_item_service.getLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Get custom field IDs from the line items of an order.
for line_item in response['results']:
if 'customFieldValues' in line_item:
for custom_field_value in line_item['customFieldValues']:
custom_field_ids.add(custom_field_value['customFieldId'])
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
# Create statement object to filter for an order.
filter_statement = {'query': query, 'values': values}
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['LINE_ITEM_ID', 'LINE_ITEM_NAME'],
'statement': filter_statement,
'columns': ['AD_SERVER_IMPRESSIONS'],
'dateRangeType': 'LAST_MONTH',
'customFieldIds': list(custom_field_ids)
}
}
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.DfpReportError, e:
print 'Failed to generate report. Error was: %s' % e
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print 'Report job with id \'%s\' downloaded to:\n%s' % (
report_job_id, report_file.name)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, ORDER_ID)
| {
"content_hash": "b1659baa4db102b43eb868017a72f93b",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 77,
"avg_line_length": 29.76923076923077,
"alnum_prop": 0.6640826873385013,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "250975b7809dd1cc7f429937c1d6d3d55d8a6138",
"size": "3327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201403/report_service/run_report_with_custom_fields.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
import pytest
from qtpy import PYQT5, PYQT6, PYSIDE2
@pytest.mark.skipif(PYQT5 or PYQT6 or PYSIDE2,
reason="Not available by default in PyQt. Not available for PySide2")
def test_qtnetworkauth():
"""Test the qtpy.QtNetworkAuth namespace"""
from qtpy import QtNetworkAuth
assert QtNetworkAuth.QAbstractOAuth is not None
assert QtNetworkAuth.QAbstractOAuth2 is not None
assert QtNetworkAuth.QAbstractOAuthReplyHandler is not None
assert QtNetworkAuth.QOAuth1 is not None
assert QtNetworkAuth.QOAuth1Signature is not None
assert QtNetworkAuth.QOAuth2AuthorizationCodeFlow is not None
| {
"content_hash": "3b7bb2ad5c7af5353561032ddd01cfb4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 89,
"avg_line_length": 42.333333333333336,
"alnum_prop": 0.7700787401574803,
"repo_name": "davvid/qtpy",
"id": "e41a5db1ae884ea66a2e0f9bc2e92af950989dff",
"size": "635",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qtpy/tests/test_qtnetworkauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69789"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
import argparse
from typing import List
import torch.jit
from parlai.utils.io import PathManager
def test_exported_model(scripted_model_file: str, inputs: List[str]):
with PathManager.open(scripted_model_file, "rb") as f:
scripted_module = torch.jit.load(f)
print('\nGenerating given the scripted module:')
context = []
for input_ in inputs:
print(' TEXT: ' + input_)
context.append(input_)
label = scripted_module('\n'.join(context))
print("LABEL: " + label)
context.append(label)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-smf',
'--scripted-model-file',
type=str,
help='Where to load the scripted model checkpoint from',
)
parser.add_argument(
"-i",
"--input",
type=str,
default="hello world",
help="Test input string to pass into the encoder of the scripted model. Separate lines with a pipe",
)
args = parser.parse_args()
test_exported_model(
scripted_model_file=args.scripted_model_file, inputs=args.input.split('|')
)
| {
"content_hash": "878eb3886856fbd1c3b97ac1c50b6c01",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 108,
"avg_line_length": 27.38095238095238,
"alnum_prop": 0.6147826086956522,
"repo_name": "facebookresearch/ParlAI",
"id": "8f19d1286eec1c8bfb4d96aa7616172187a791af",
"size": "1350",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/torchscript/scripts/test_exported_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
import csv, json, os, sys
# pass the filename as an argument when calling this script
if len(sys.argv) < 2:
sys.exit('Usage: csv-to-json.py /path/to/file.csv')
fileIn = sys.argv[1]
fileOnly = os.path.basename(fileIn)
try:
fileOut = sys.argv[2]
except IndexError:
fileList = [fileOnly.split('.')[0], 'json']
fileOut = ".".join(fileList)
data = csv.reader(open(fileIn, 'rU'), delimiter=',')
# get header row
fieldnames = data.next()
# get number of columns
fieldnames_len = len(fieldnames)
jsonList = []
i = 0
for row in data:
# add an empty dict to the list
jsonList.append({})
for j in range(0, len(row)):
jsonList[i][fieldnames[j]] = row[j]
# what if last cells are empty?
for j in range(len(row), fieldnames_len):
jsonList[i][fieldnames[j]] = ""
i = i + 1
with open(fileOut, 'w') as outfile:
json.dump(jsonList, outfile, sort_keys=True, indent=4, ensure_ascii=False)
sys.exit() | {
"content_hash": "6e33770bc127d5002ec6fb4acc6ef750",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 22.6,
"alnum_prop": 0.6758849557522124,
"repo_name": "lenwood/csv-json",
"id": "7be0d186d5a60e6d327593233ecf9ddba4545c7c",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/csv-to-json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "1016"
},
{
"name": "Python",
"bytes": "4637"
}
],
"symlink_target": ""
} |
import logging
from tornado import escape
from odin.adapters.adapter import ApiAdapterResponse, \
request_types, response_types
from odin_data.odin_data_adapter import OdinDataAdapter
class MetaListenerAdapter(OdinDataAdapter):
"""An OdinControl adapter for a MetaListener"""
def __init__(self, **kwargs):
logging.debug("MetaListenerAdapter init called")
# These are internal adapter parameters
self.acquisition_id = None
self.acquisition_active = False
self.acquisitions = []
# These parameters are stored under an acquisition tree, so we need to
# parse out the parameters for the acquisition we have stored
self._status_parameters = {}
self._set_defaults()
# These config parameters are buffered so they can be included whenever a new acquisition
# is created. This helps to abstract the idea of acquisitions being created and removed and
# means the client does not need to send things in a certain order.
self._config_parameters = {
"config/directory": "",
"config/file_prefix": "",
"config/flush_frame_frequency": 100,
"config/flush_timeout": 1,
}
# Parameters must be created before base init called
super(MetaListenerAdapter, self).__init__(**kwargs)
self._client = self._clients[0] # We only have one client
def _set_defaults(self):
self._status_parameters = {
"status/full_file_path": "",
"status/num_processors": 0,
"status/writing": False,
"status/written": 0
}
def _map_acquisition_parameter(self, path):
"""Map acquisition parameter path string to full uri item list"""
# Replace the first slash with acquisitions/<acquisition_id>/
# E.g. status/filename -> status/acquisitions/<acquisition_id>/filename
full_path = path.replace(
"/", "/acquisitions/{}/".format(self.acquisition_id),
1 # First slash only
)
return full_path.split("/") # Return list of uri items
@request_types('application/json')
@response_types('application/json', default='application/json')
def get(self, path, request):
"""Implementation of the HTTP GET verb for MetaListenerAdapter
:param path: URI path of the GET request
:param request: Tornado HTTP request object
:return: ApiAdapterResponse object to be returned to the client
"""
status_code = 200
response = {}
logging.debug("GET path: %s", path)
logging.debug("GET request: %s", request)
if path == "config/acquisition_id":
response["value"] = self.acquisition_id
elif path == "status/acquisition_active":
response["value"] = self.acquisition_active
elif path == "config/acquisitions":
acquisition_tree = self.traverse_parameters(
self._clients[0].parameters,
["config", "acquisitions"]
)
if acquisition_tree is not None:
response["value"] = "," .join(acquisition_tree.keys())
else:
response["value"] = None
elif path in self._status_parameters:
response["value"] = self._status_parameters[path]
elif path in self._config_parameters:
response["value"] = self._config_parameters[path]
else:
return super(MetaListenerAdapter, self).get(path, request)
return ApiAdapterResponse(response, status_code=status_code)
@request_types('application/json')
@response_types('application/json', default='application/json')
def put(self, path, request):
"""
Implementation of the HTTP PUT verb for MetaListenerAdapter
:param path: URI path of the PUT request
:param request: Tornado HTTP request object
:return: ApiAdapterResponse object to be returned to the client
"""
logging.debug("PUT path: %s", path)
logging.debug("PUT request: %s", request)
logging.debug("PUT request.body: %s",
str(escape.url_unescape(request.body)))
value = str(escape.url_unescape(request.body)).replace('"', '')
if path == "config/acquisition_id":
self.acquisition_id = value
# Set inactive so process_updates doesn't clear acquisition ID
self.acquisition_active = False
# Send entire config with new acquisition ID
config = dict(acquisition_id=self.acquisition_id)
for key, value in self._config_parameters.items():
# Add config parameters without config/ prefix
config[key.split("config/")[-1]] = value
status_code, response = self._send_config(config)
elif path == "config/stop":
self.acquisition_active = False
# By default we stop all acquisitions by passing None
config = {
"acquisition_id": None,
"stop": True
}
if self.acquisition_id is not None:
# If we have an Acquisition ID then stop that one only
config["acquisition_id"] = self.acquisition_id
status_code, response = self._send_config(config)
self.acquisition_id = None
elif path in self._config_parameters:
# Store config to re-send with acquisition ID when it is changed
self._config_parameters[path] = value
parameter = path.split("/", 1)[-1] # Remove 'config/'
config = {
"acquisition_id": self.acquisition_id,
parameter: value
}
status_code, response = self._send_config(config)
else:
return super(MetaListenerAdapter, self).put(path, request)
return ApiAdapterResponse(response, status_code=status_code)
def _send_config(self, config_message):
status_code = 200
response = {}
try:
self._client.send_configuration(config_message)
except Exception as err:
logging.debug(OdinDataAdapter.ERROR_FAILED_TO_SEND)
logging.error("Error: %s", err)
status_code = 503
response = {"error": OdinDataAdapter.ERROR_FAILED_TO_SEND}
return status_code, response
def process_updates(self):
"""Handle additional background update loop tasks
Store a copy of all parameters so they don't disappear
"""
if self.acquisition_id is not None:
acquisitions = self.traverse_parameters(
self._client.parameters, ["status", "acquisitions"]
)
acquisition_active = (
acquisitions is not None and self.acquisition_id in acquisitions
)
if acquisition_active:
self.acquisition_active = True
for parameter in self._status_parameters:
value = self.traverse_parameters(
self._client.parameters,
self._map_acquisition_parameter(parameter)
)
self._status_parameters[parameter] = value
else:
self.acquisition_active = False
self._set_defaults()
else:
self._set_defaults()
| {
"content_hash": "26cdd5abae5d59930cd64350ffef724c",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 99,
"avg_line_length": 39.27894736842105,
"alnum_prop": 0.5921211309125016,
"repo_name": "odin-detector/odin-data",
"id": "449f0398b6339325af518613a7c882d4f4e84fb4",
"size": "7463",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/python/odin_data/meta_listener_adapter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26513"
},
{
"name": "C++",
"bytes": "1382369"
},
{
"name": "CMake",
"bytes": "41649"
},
{
"name": "CSS",
"bytes": "1144"
},
{
"name": "HTML",
"bytes": "7416"
},
{
"name": "Java",
"bytes": "29337"
},
{
"name": "JavaScript",
"bytes": "9693"
},
{
"name": "Python",
"bytes": "352270"
},
{
"name": "Shell",
"bytes": "5240"
}
],
"symlink_target": ""
} |
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('stock', '0013_auto_20190908_0916'),
]
operations = [
migrations.AlterField(
model_name='stocklocation',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='children', to='stock.StockLocation'),
),
]
| {
"content_hash": "4db5e9d6ea5583c7e7df6284d062dbb7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 167,
"avg_line_length": 27.944444444444443,
"alnum_prop": 0.6520874751491054,
"repo_name": "SchrodingersGat/InvenTree",
"id": "b6f7411e10903fd690f3f04969797cf63e591709",
"size": "552",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "InvenTree/stock/migrations/0014_auto_20190908_0918.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246488"
},
{
"name": "Dockerfile",
"bytes": "7169"
},
{
"name": "HTML",
"bytes": "584199"
},
{
"name": "JavaScript",
"bytes": "1968805"
},
{
"name": "Procfile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2600145"
},
{
"name": "Shell",
"bytes": "33927"
}
],
"symlink_target": ""
} |
import os
from azure.core.credentials import AccessToken, AzureKeyCredential
from azure.maps.geolocation import MapsGeolocationClient
from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy
from geolocation_preparer import MapsGeolocationPreparer
class TestMapsGeolocationClient(AzureRecordedTestCase):
def setup_method(self, method):
self.client = MapsGeolocationClient(
credential=AzureKeyCredential(os.environ.get('AZURE_SUBSCRIPTION_KEY', "AzureMapsSubscriptionKey"))
)
assert self.client is not None
@MapsGeolocationPreparer()
@recorded_by_proxy
def test_get_country_code(self):
result = self.client.get_country_code(ip_address="2001:4898:80e8:b::189")
assert result is not None
assert result.iso_code == 'US' | {
"content_hash": "cae57bf6bda445c5f3b0e4fcfd594786",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 111,
"avg_line_length": 42.526315789473685,
"alnum_prop": 0.7524752475247525,
"repo_name": "Azure/azure-sdk-for-python",
"id": "303c940360551ecb81d8dc11012fabdf31ec9717",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/maps/azure-maps-geolocation/tests/test_geolocation_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import cStringIO
import logging
import os
import sys
import textwrap
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import dmprof
from dmprof import FUNCTION_ADDRESS, TYPEINFO_ADDRESS
class SymbolMappingCacheTest(unittest.TestCase):
class MockBucketSet(object):
def __init__(self, addresses):
self._addresses = addresses
def iter_addresses(self, address_type): # pylint: disable=W0613
for address in self._addresses:
yield address
class MockSymbolFinder(object):
def __init__(self, mapping):
self._mapping = mapping
def find(self, address_list):
return [self._mapping[address] for address in address_list]
_TEST_FUNCTION_CACHE = textwrap.dedent("""\
1 0x0000000000000001
7fc33eebcaa4 __gnu_cxx::new_allocator::allocate
7fc33ef69242 void DispatchToMethod
""")
_EXPECTED_TEST_FUNCTION_CACHE = textwrap.dedent("""\
1 0x0000000000000001
7fc33eebcaa4 __gnu_cxx::new_allocator::allocate
7fc33ef69242 void DispatchToMethod
2 0x0000000000000002
7fc33ef7bc3e std::map::operator[]
7fc34411f9d5 WTF::RefCounted::operator new
""")
_TEST_FUNCTION_ADDRESS_LIST1 = [
0x1, 0x7fc33eebcaa4, 0x7fc33ef69242]
_TEST_FUNCTION_ADDRESS_LIST2 = [
0x1, 0x2, 0x7fc33eebcaa4, 0x7fc33ef69242, 0x7fc33ef7bc3e, 0x7fc34411f9d5]
_TEST_FUNCTION_DICT = {
0x1: '0x0000000000000001',
0x2: '0x0000000000000002',
0x7fc33eebcaa4: '__gnu_cxx::new_allocator::allocate',
0x7fc33ef69242: 'void DispatchToMethod',
0x7fc33ef7bc3e: 'std::map::operator[]',
0x7fc34411f9d5: 'WTF::RefCounted::operator new',
}
def test_update(self):
symbol_mapping_cache = dmprof.SymbolMappingCache()
cache_f = cStringIO.StringIO()
cache_f.write(self._TEST_FUNCTION_CACHE)
# No update from self._TEST_FUNCTION_CACHE
symbol_mapping_cache.update(
FUNCTION_ADDRESS,
self.MockBucketSet(self._TEST_FUNCTION_ADDRESS_LIST1),
self.MockSymbolFinder(self._TEST_FUNCTION_DICT), cache_f)
for address in self._TEST_FUNCTION_ADDRESS_LIST1:
self.assertEqual(self._TEST_FUNCTION_DICT[address],
symbol_mapping_cache.lookup(FUNCTION_ADDRESS, address))
self.assertEqual(self._TEST_FUNCTION_CACHE, cache_f.getvalue())
# Update to self._TEST_FUNCTION_ADDRESS_LIST2
symbol_mapping_cache.update(
FUNCTION_ADDRESS,
self.MockBucketSet(self._TEST_FUNCTION_ADDRESS_LIST2),
self.MockSymbolFinder(self._TEST_FUNCTION_DICT), cache_f)
for address in self._TEST_FUNCTION_ADDRESS_LIST2:
self.assertEqual(self._TEST_FUNCTION_DICT[address],
symbol_mapping_cache.lookup(FUNCTION_ADDRESS, address))
self.assertEqual(self._EXPECTED_TEST_FUNCTION_CACHE, cache_f.getvalue())
class PolicyTest(unittest.TestCase):
class MockSymbolMappingCache(object):
def __init__(self):
self._symbol_caches = {FUNCTION_ADDRESS: {}, TYPEINFO_ADDRESS: {}}
def add(self, address_type, address, symbol):
self._symbol_caches[address_type][address] = symbol
def lookup(self, address_type, address):
symbol = self._symbol_caches[address_type].get(address)
return symbol if symbol else '0x%016x' % address
_TEST_POLICY = textwrap.dedent("""\
{
"components": [
"second",
"mmap-v8",
"malloc-v8",
"malloc-WebKit",
"mmap-catch-all",
"malloc-catch-all"
],
"rules": [
{
"name": "second",
"stacktrace": "optional",
"allocator": "optional"
},
{
"name": "mmap-v8",
"stacktrace": ".*v8::.*",
"allocator": "mmap"
},
{
"name": "malloc-v8",
"stacktrace": ".*v8::.*",
"allocator": "malloc"
},
{
"name": "malloc-WebKit",
"stacktrace": ".*WebKit::.*",
"allocator": "malloc"
},
{
"name": "mmap-catch-all",
"stacktrace": ".*",
"allocator": "mmap"
},
{
"name": "malloc-catch-all",
"stacktrace": ".*",
"allocator": "malloc"
}
],
"version": "POLICY_DEEP_3"
}
""")
def test_load(self):
policy = dmprof.Policy.parse(cStringIO.StringIO(self._TEST_POLICY), 'json')
self.assertTrue(policy)
self.assertEqual('POLICY_DEEP_3', policy.version)
def test_find(self):
policy = dmprof.Policy.parse(cStringIO.StringIO(self._TEST_POLICY), 'json')
self.assertTrue(policy)
symbol_mapping_cache = self.MockSymbolMappingCache()
symbol_mapping_cache.add(FUNCTION_ADDRESS, 0x1212, 'v8::create')
symbol_mapping_cache.add(FUNCTION_ADDRESS, 0x1381, 'WebKit::create')
bucket1 = dmprof.Bucket([0x1212, 0x013], False, 0x29492, '_Z')
bucket1.symbolize(symbol_mapping_cache)
bucket2 = dmprof.Bucket([0x18242, 0x1381], False, 0x9492, '_Z')
bucket2.symbolize(symbol_mapping_cache)
bucket3 = dmprof.Bucket([0x18242, 0x181], False, 0x949, '_Z')
bucket3.symbolize(symbol_mapping_cache)
self.assertEqual('malloc-v8', policy.find(bucket1))
self.assertEqual('malloc-WebKit', policy.find(bucket2))
self.assertEqual('malloc-catch-all', policy.find(bucket3))
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
| {
"content_hash": "bd2948fc459886a0cb0befc1da7ffa04",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 32.55747126436781,
"alnum_prop": 0.6241835834068844,
"repo_name": "nacl-webkit/chrome_deps",
"id": "23eaaa75c1a0fdbebeee5357a8c12387ca7e7a99",
"size": "5854",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/deep_memory_profiler/tests/dmprof_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1173441"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "74568368"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "156174457"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3088381"
},
{
"name": "JavaScript",
"bytes": "18179048"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "3044"
},
{
"name": "Objective-C",
"bytes": "6965520"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932725"
},
{
"name": "Python",
"bytes": "8458718"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1526176"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XSLT",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import os
import sys
import mock
import pytest
from praw.config import Config
from praw.exceptions import ClientException
class TestConfig(object):
@staticmethod
def _assert_config_read(environment, mock_config):
mock_instance = mock_config.return_value
Config.CONFIG = None # Force config file reload
prev_environment = {environment: None}
for env_name in ["APPDATA", "HOME", "XDG_CONFIG_HOME"]:
if env_name in os.environ:
prev_environment[env_name] = os.environ[env_name]
del os.environ[env_name]
os.environ[environment] = "/MOCK"
module_dir = os.path.dirname(sys.modules["praw"].__file__)
environ_path = os.path.join(
"/MOCK", ".config" if environment == "HOME" else "", "praw.ini"
)
locations = [
os.path.join(module_dir, "praw.ini"),
environ_path,
"praw.ini",
]
try:
Config._load_config()
mock_instance.read.assert_called_with(locations)
finally:
Config.CONFIG = None # Force config file reload
for env_name in prev_environment:
if prev_environment[env_name] is None:
del os.environ[env_name]
else:
os.environ[env_name] = prev_environment[env_name]
def test_check_for_updates__false(self):
for value in [False, "False", "other"]:
config = Config("DEFAULT", check_for_updates=value)
assert config.check_for_updates is False
def test_custom__extra_values_set(self):
config = Config("DEFAULT", user1="foo", user2="bar")
assert config.custom == {"user1": "foo", "user2": "bar"}
def test_custom__no_extra_values_set(self):
config = Config("DEFAULT")
assert config.custom == {}
def test_check_for_updates__true(self):
for value in [True, "1", "true", "YES", "on"]:
config = Config("DEFAULT", check_for_updates=value)
assert config.check_for_updates is True
@mock.patch("configparser.RawConfigParser")
def test_load_ini_from_appdata(self, mock_config):
self._assert_config_read("APPDATA", mock_config)
@mock.patch("configparser.RawConfigParser")
def test_load_ini_from_home(self, mock_config):
self._assert_config_read("HOME", mock_config)
@mock.patch("configparser.RawConfigParser")
def test_load_ini_from_xdg_config_home(self, mock_config):
self._assert_config_read("XDG_CONFIG_HOME", mock_config)
@mock.patch("configparser.RawConfigParser")
def test_load_ini_with_no_config_directory(self, mock_config):
mock_instance = mock_config.return_value
Config.CONFIG = None # Force config file reload
prev_environment = {}
for key in ["APPDATA", "HOME", "XDG_CONFIG_HOME"]:
if key in os.environ:
prev_environment[key] = os.environ[key]
del os.environ[key]
module_dir = os.path.dirname(sys.modules["praw"].__file__)
locations = [os.path.join(module_dir, "praw.ini"), "praw.ini"]
try:
Config._load_config()
mock_instance.read.assert_called_with(locations)
finally:
Config.CONFIG = None # Force config file reload
for key, value in prev_environment.items():
os.environ[key] = value
def test_short_url(self):
config = Config("DEFAULT")
assert config.short_url == "https://redd.it"
def test_short_url_not_defined(self):
config = Config("DEFAULT", short_url=None)
with pytest.raises(ClientException) as excinfo:
config.short_url
assert str(excinfo.value) == "No short domain specified."
def test_unset_value_has_useful_string_representation(self):
config = Config("DEFAULT", password=Config.CONFIG_NOT_SET)
assert str(config.password) == "NotSet"
| {
"content_hash": "c2a6a506df3fc73ef389342a009c6a8f",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 75,
"avg_line_length": 37.3177570093458,
"alnum_prop": 0.6020535937891309,
"repo_name": "leviroth/praw",
"id": "37b2e730d03d455b8edc61848a5940f14e88b93b",
"size": "3993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_config.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "513471"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
from pyramid.config import Configurator
from pyramid.authorization import ACLAuthorizationPolicy
from .models.sqltraversal import root_factory
from .security import groupfinder
def main(global_config, **settings):
config = Configurator(settings=settings,
root_factory=root_factory)
config.include('pyramid_tm')
config.include('pyramid_sqlalchemy')
config.include('pyramid_jinja2')
# Wire up security policy
from moonrock.security import JWTAuthenticationPolicy
config.set_authentication_policy(
JWTAuthenticationPolicy(settings['TOKEN_SECRET'],
callback=groupfinder)
)
config.set_authorization_policy(ACLAuthorizationPolicy())
config.include('.auth')
config.include('.subscribers')
config.include('.views')
config.add_static_view(name='/', path='moonrock:../dist')
return config.make_wsgi_app()
| {
"content_hash": "beb043cadc5f183c524269d2f96adf1c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.6983783783783784,
"repo_name": "pauleveritt/moonshot",
"id": "e5ce16e306b73cce39c03281ac77857ae3501352",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moonrock/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33729"
},
{
"name": "JavaScript",
"bytes": "79531"
},
{
"name": "Python",
"bytes": "18274"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from bs4 import BeautifulSoup
import requests
from .emoji import Emoji
EMOJI_CATEGORIES = ['people', 'nature', 'food-drink', 'activity',
'travel-places', 'objects', 'symbols', 'flags']
'''List of all valid emoji categories
'''
class Emojipedia:
@staticmethod
def search(query):
'''Searches for emojis on Emojipedia. Query must be a valid emoji name.
:param str query: the search query
:returns: Emoji with the given name
:rtype: Emoji
'''
return Emoji(Emojipedia._get_emoji_page(query))
@staticmethod
def random():
'''Returns a random emoji.
:returns: A random emoji
:rtype: Emoji
'''
return Emoji(Emojipedia._get_emoji_page('random'))
@staticmethod
def category(query):
'''Returns list of all emojis in the given category.
:returns: List of emojies in the category
:rtype: [Emoji]
'''
if query not in EMOJI_CATEGORIES:
raise ValueError('{} is not a valid emoji category.'.format(query))
soup = Emojipedia._get_page(query)
emoji_list = soup.find('ul', {'class': 'emoji-list'})
if not emoji_list:
raise ValueError('Could not extract emoji list')
emojis = []
for emoji_entry in emoji_list.find_all('li'):
url = emoji_entry.find('a')['href']
emoji_text = emoji_entry.text.split(' ')
title = ' '.join(emoji_text[1:])
char = emoji_text[0]
e = Emoji(url=url)
e._title = title
e._character = char
emojis.append(e)
return emojis
@staticmethod
def all():
'''Returns list of emojis in Emojipedia.
An extremely powerful method.
Returns all emojis known to human-kind. 😎
:returns: List of all emojies
:rtype: [Emoji]
'''
soup = Emojipedia._get_page('emoji')
emoji_list = soup.find('table', {'class': 'emoji-list'})
if not emoji_list:
raise ValueError('Could not extract emoji list')
emojis = []
for emoji_entry in emoji_list.find_all('tr'):
emoji_link = emoji_entry.find('a')
emoji_text = emoji_link.text.split(' ')
emoji_row, codepoints = emoji_entry.find_all('td')
e = Emoji(url=emoji_link['href'])
e._codepoints = codepoints.text.split(', ')
e._character, e._title = emoji_text[0], ' '.join(emoji_text[1:])
emojis.append(e)
return emojis
@staticmethod
def _valid_emoji_page(soup):
"""
(soup) -> bool
"""
_type = soup.find('meta', {'property': 'og:type'})
return (_type and _type['content'] == 'article')
@staticmethod
def _get_page(query):
response = requests.get('http://emojipedia.org/' + query)
if response.status_code != 200:
raise RuntimeError('Could not get emojipedia page for \'{0}\''
.format(query))
return BeautifulSoup(response.text, 'html.parser')
@staticmethod
def _get_emoji_page(query):
soup = Emojipedia._get_page(query)
if not Emojipedia._valid_emoji_page(soup):
raise ValueError('Query did not yield a emoji entry')
return soup
| {
"content_hash": "1acaa389141fd43e4789813f353a3120",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 33.10576923076923,
"alnum_prop": 0.5570723206505954,
"repo_name": "benjamincongdon/python-emojipedia",
"id": "5ac9f61f0db86f04d01354d831f2d33b362ba687",
"size": "3471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emojipedia/emojipedia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7231"
}
],
"symlink_target": ""
} |
import sqlalchemy as SA
import core.db as db
from core.mail import MailQueue
from core.requesthandler import RequestHandler
import core.util
class DelayRequestServlet(RequestHandler):
def post(self):
if not self.current_user:
return self.send_error(403)
self.requestid = core.util.get_int_arg(self.request, 'id')
update_query = db.push_requests.update().where(SA.and_(
db.push_requests.c.id == self.requestid,
db.push_requests.c.state.in_(('requested', 'pickme')),
)).values({
'state': 'delayed',
})
delete_query = db.push_pushcontents.delete().where(
SA.exists([1], SA.and_(
db.push_pushcontents.c.request == db.push_requests.c.id,
db.push_requests.c.state == 'delayed',
)))
select_query = db.push_requests.select().where(
db.push_requests.c.id == self.requestid,
)
db.execute_transaction_cb([update_query, delete_query, select_query], self.on_db_complete)
get = post
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
_, _, req = db_results
req = req.first()
if req['state'] != 'delayed':
# We didn't actually discard the record, for whatever reason
return self.redirect("/requests?user=%s" % self.current_user)
if req['watchers']:
user_string = '%s (%s)' % (req['user'], req['watchers'])
users = [req['user']] + req['watchers'].split(',')
else:
user_string = req['user']
users = [req['user']]
msg = (
"""
<p>
Request for %(user)s has been marked as delayed by %(pushmaster)s, and will not be accepted into pushes until you
mark it as requested again:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
Regards,<br />
PushManager
</p>"""
) % core.util.EscapedDict({
'pushmaster': self.current_user,
'user': user_string,
'title': req['title'],
'repo': req['repo'],
'branch': req['branch'],
})
subject = "[push] %s - %s" % (user_string, req['title'])
MailQueue.enqueue_user_email(users, msg, subject)
self.redirect("/requests?user=%s" % self.current_user)
| {
"content_hash": "b49fed1a0b6a70300c58a238ffdc993b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 129,
"avg_line_length": 35.888888888888886,
"alnum_prop": 0.5193498452012384,
"repo_name": "pombredanne/pushmanager",
"id": "fc96c23145f6f3ec57eaf17841be112f594cc4d3",
"size": "2584",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "servlets/delayrequest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
import tempfile
from os import path
import pytest
from docker.errors import APIError
from six import StringIO
from six import text_type
from .. import mock
from .testcases import DockerClientTestCase
from .testcases import get_links
from .testcases import pull_busybox
from compose import __version__
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_CONTAINER_NUMBER
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.const import LABEL_VERSION
from compose.container import Container
from compose.project import OneOffFilter
from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy
from compose.service import NetworkMode
from compose.service import Service
from compose.service import ServiceError
from tests.integration.testcases import v2_only
def create_and_start_container(service, **override_options):
container = service.create_container(**override_options)
return service.start_container(container)
class ServiceTest(DockerClientTestCase):
def test_containers(self):
foo = self.create_service('foo')
bar = self.create_service('bar')
create_and_start_container(foo)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(foo.containers()[0].name, 'composetest_foo_1')
self.assertEqual(len(bar.containers()), 0)
create_and_start_container(bar)
create_and_start_container(bar)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(len(bar.containers()), 2)
names = [c.name for c in bar.containers()]
self.assertIn('composetest_bar_1', names)
self.assertIn('composetest_bar_2', names)
def test_containers_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(db.containers(stopped=True), [])
self.assertEqual(db.containers(one_off=OneOffFilter.only, stopped=True), [container])
def test_project_is_added_to_container_name(self):
service = self.create_service('web')
create_and_start_container(service)
self.assertEqual(service.containers()[0].name, 'composetest_web_1')
def test_create_container_with_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_one_off_when_existing_container_is_running(self):
db = self.create_service('db')
db.start()
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
container = service.create_container()
service.start_container(container)
assert container.get_mount('/var/db')
def test_create_container_with_volume_driver(self):
service = self.create_service('db', volume_driver='foodriver')
container = service.create_container()
service.start_container(container)
self.assertEqual('foodriver', container.get('HostConfig.VolumeDriver'))
def test_create_container_with_cpu_shares(self):
service = self.create_service('db', cpu_shares=73)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpuShares'), 73)
def test_create_container_with_cpu_quota(self):
service = self.create_service('db', cpu_quota=40000)
container = service.create_container()
container.start()
self.assertEqual(container.get('HostConfig.CpuQuota'), 40000)
def test_create_container_with_shm_size(self):
self.require_api_version('1.22')
service = self.create_service('db', shm_size=67108864)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
def test_create_container_with_extra_hosts_list(self):
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
def test_create_container_with_extra_hosts_dicts(self):
extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
def test_create_container_with_cpu_set(self):
service = self.create_service('db', cpuset='0')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
def test_create_container_with_read_only_root_fs(self):
read_only = True
service = self.create_service('db', read_only=read_only)
container = service.create_container()
service.start_container(container)
assert container.get('HostConfig.ReadonlyRootfs') == read_only
def test_create_container_with_security_opt(self):
security_opt = ['label:disable']
service = self.create_service('db', security_opt=security_opt)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
def test_create_container_with_mac_address(self):
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
def test_create_container_with_specified_volume(self):
host_path = '/tmp/host-path'
container_path = '/container-path'
service = self.create_service(
'db',
volumes=[VolumeSpec(host_path, container_path, 'rw')])
container = service.create_container()
service.start_container(container)
assert container.get_mount(container_path)
# Match the last component ("host-path"), because boot2docker symlinks /tmp
actual_host_path = container.get_mount(container_path)['Source']
self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
def test_recreate_preserves_volume_with_trailing_slash(self):
"""When the Compose file specifies a trailing slash in the container path, make
sure we copy the volume over when recreating.
"""
service = self.create_service('data', volumes=[VolumeSpec.parse('/data/')])
old_container = create_and_start_container(service)
volume_path = old_container.get_mount('/data')['Source']
new_container = service.recreate_container(old_container)
self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
def test_duplicate_volume_trailing_slash(self):
"""
When an image specifies a volume, and the Compose file specifies a host path
but adds a trailing slash, make sure that we don't create duplicate binds.
"""
host_path = '/tmp/data'
container_path = '/data'
volumes = [VolumeSpec.parse('{}:{}/'.format(host_path, container_path))]
tmp_container = self.client.create_container(
'busybox', 'true',
volumes={container_path: {}},
labels={'com.docker.compose.test_image': 'true'},
)
image = self.client.commit(tmp_container)['Id']
service = self.create_service('db', image=image, volumes=volumes)
old_container = create_and_start_container(service)
self.assertEqual(
old_container.get('Config.Volumes'),
{container_path: {}},
)
service = self.create_service('db', image=image, volumes=volumes)
new_container = service.recreate_container(old_container)
self.assertEqual(
new_container.get('Config.Volumes'),
{container_path: {}},
)
self.assertEqual(service.containers(stopped=False), [new_container])
def test_create_container_with_volumes_from(self):
volume_service = self.create_service('data')
volume_container_1 = volume_service.create_container()
volume_container_2 = Container.create(
self.client,
image='busybox:latest',
command=["top"],
labels={LABEL_PROJECT: 'composetest'},
)
host_service = self.create_service(
'host',
volumes_from=[
VolumeFromSpec(volume_service, 'rw', 'service'),
VolumeFromSpec(volume_container_2, 'rw', 'container')
]
)
host_container = host_service.create_container()
host_service.start_container(host_container)
self.assertIn(volume_container_1.id + ':rw',
host_container.get('HostConfig.VolumesFrom'))
self.assertIn(volume_container_2.id + ':rw',
host_container.get('HostConfig.VolumesFrom'))
def test_execute_convergence_plan_recreate(self):
service = self.create_service(
'db',
environment={'FOO': '1'},
volumes=[VolumeSpec.parse('/etc')],
entrypoint=['top'],
command=['-d', '1']
)
old_container = service.create_container()
self.assertEqual(old_container.get('Config.Entrypoint'), ['top'])
self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
self.assertIn('FOO=1', old_container.get('Config.Env'))
self.assertEqual(old_container.name, 'composetest_db_1')
service.start_container(old_container)
old_container.inspect() # reload volume data
volume_path = old_container.get_mount('/etc')['Source']
num_containers_before = len(self.client.containers(all=True))
service.options['environment']['FOO'] = '2'
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
self.assertEqual(new_container.get('Config.Entrypoint'), ['top'])
self.assertEqual(new_container.get('Config.Cmd'), ['-d', '1'])
self.assertIn('FOO=2', new_container.get('Config.Env'))
self.assertEqual(new_container.name, 'composetest_db_1')
self.assertEqual(new_container.get_mount('/etc')['Source'], volume_path)
self.assertIn(
'affinity:container==%s' % old_container.id,
new_container.get('Config.Env'))
self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
self.assertNotEqual(old_container.id, new_container.id)
self.assertRaises(APIError,
self.client.inspect_container,
old_container.id)
def test_execute_convergence_plan_recreate_twice(self):
service = self.create_service(
'db',
volumes=[VolumeSpec.parse('/etc')],
entrypoint=['top'],
command=['-d', '1'])
orig_container = service.create_container()
service.start_container(orig_container)
orig_container.inspect() # reload volume data
volume_path = orig_container.get_mount('/etc')['Source']
# Do this twice to reproduce the bug
for _ in range(2):
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [orig_container]))
assert new_container.get_mount('/etc')['Source'] == volume_path
assert ('affinity:container==%s' % orig_container.id in
new_container.get('Config.Env'))
orig_container = new_container
def test_execute_convergence_plan_when_containers_are_stopped(self):
service = self.create_service(
'db',
environment={'FOO': '1'},
volumes=[VolumeSpec.parse('/var/db')],
entrypoint=['top'],
command=['-d', '1']
)
service.create_container()
containers = service.containers(stopped=True)
self.assertEqual(len(containers), 1)
container, = containers
self.assertFalse(container.is_running)
service.execute_convergence_plan(ConvergencePlan('start', [container]))
containers = service.containers()
self.assertEqual(len(containers), 1)
container.inspect()
self.assertEqual(container, containers[0])
self.assertTrue(container.is_running)
def test_execute_convergence_plan_with_image_declared_volume(self):
service = Service(
project='composetest',
name='db',
client=self.client,
build={'context': 'tests/fixtures/dockerfile-with-volume'},
)
old_container = create_and_start_container(service)
self.assertEqual(
[mount['Destination'] for mount in old_container.get('Mounts')], ['/data']
)
volume_path = old_container.get_mount('/data')['Source']
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
self.assertEqual(
[mount['Destination'] for mount in new_container.get('Mounts')],
['/data']
)
self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
def test_execute_convergence_plan_when_image_volume_masks_config(self):
service = self.create_service(
'db',
build={'context': 'tests/fixtures/dockerfile-with-volume'},
)
old_container = create_and_start_container(service)
self.assertEqual(
[mount['Destination'] for mount in old_container.get('Mounts')],
['/data']
)
volume_path = old_container.get_mount('/data')['Source']
service.options['volumes'] = [VolumeSpec.parse('/tmp:/data')]
with mock.patch('compose.service.log') as mock_log:
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
mock_log.warn.assert_called_once_with(mock.ANY)
_, args, kwargs = mock_log.warn.mock_calls[0]
self.assertIn(
"Service \"db\" is using volume \"/data\" from the previous container",
args[0])
self.assertEqual(
[mount['Destination'] for mount in new_container.get('Mounts')],
['/data']
)
self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
def test_execute_convergence_plan_when_host_volume_is_removed(self):
host_path = '/tmp/host-path'
service = self.create_service(
'db',
build={'context': 'tests/fixtures/dockerfile-with-volume'},
volumes=[VolumeSpec(host_path, '/data', 'rw')])
old_container = create_and_start_container(service)
assert (
[mount['Destination'] for mount in old_container.get('Mounts')] ==
['/data']
)
service.options['volumes'] = []
with mock.patch('compose.service.log', autospec=True) as mock_log:
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
assert not mock_log.warn.called
assert (
[mount['Destination'] for mount in new_container.get('Mounts')] ==
['/data']
)
assert new_container.get_mount('/data')['Source'] != host_path
def test_execute_convergence_plan_without_start(self):
service = self.create_service(
'db',
build={'context': 'tests/fixtures/dockerfile-with-volume'}
)
containers = service.execute_convergence_plan(ConvergencePlan('create', []), start=False)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
containers = service.execute_convergence_plan(
ConvergencePlan('recreate', containers),
start=False)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.execute_convergence_plan(ConvergencePlan('start', containers), start=False)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
def test_start_container_passes_through_options(self):
db = self.create_service('db')
create_and_start_container(db, environment={'FOO': 'BAR'})
self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
def test_start_container_inherits_options_from_constructor(self):
db = self.create_service('db', environment={'FOO': 'BAR'})
create_and_start_container(db)
self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
def test_start_container_creates_links(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, None)])
create_and_start_container(db)
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(get_links(web.containers()[0])),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'db'])
)
def test_start_container_creates_links_with_names(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, 'custom_link_name')])
create_and_start_container(db)
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(get_links(web.containers()[0])),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'custom_link_name'])
)
def test_start_container_with_external_links(self):
db = self.create_service('db')
web = self.create_service('web', external_links=['composetest_db_1',
'composetest_db_2',
'composetest_db_3:db_3'])
for _ in range(3):
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(get_links(web.containers()[0])),
set([
'composetest_db_1',
'composetest_db_2',
'db_3']),
)
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
db = self.create_service('db')
create_and_start_container(db)
create_and_start_container(db)
c = create_and_start_container(db)
self.assertEqual(set(get_links(c)), set([]))
def test_start_one_off_container_creates_links_to_its_own_service(self):
db = self.create_service('db')
create_and_start_container(db)
create_and_start_container(db)
c = create_and_start_container(db, one_off=OneOffFilter.only)
self.assertEqual(
set(get_links(c)),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'db'])
)
def test_start_container_builds_images(self):
service = Service(
name='test',
client=self.client,
build={'context': 'tests/fixtures/simple-dockerfile'},
project='composetest',
)
container = create_and_start_container(service)
container.wait()
self.assertIn(b'success', container.logs())
self.assertEqual(len(self.client.images(name='composetest_test')), 1)
def test_start_container_uses_tagged_image_if_it_exists(self):
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
service = Service(
name='test',
client=self.client,
build={'context': 'this/does/not/exist/and/will/throw/error'},
project='composetest',
)
container = create_and_start_container(service)
container.wait()
self.assertIn(b'success', container.logs())
def test_start_container_creates_ports(self):
service = self.create_service('web', ports=[8000])
container = create_and_start_container(service).inspect()
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
def test_build(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
self.create_service('web', build={'context': base_dir}).build()
assert self.client.inspect_image('composetest_web')
def test_build_non_ascii_filename(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f:
f.write("hello world\n")
self.create_service('web', build={'context': text_type(base_dir)}).build()
assert self.client.inspect_image('composetest_web')
def test_build_with_image_name(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
image_name = 'examples/composetest:latest'
self.addCleanup(self.client.remove_image, image_name)
self.create_service('web', build={'context': base_dir}, image=image_name).build()
assert self.client.inspect_image(image_name)
def test_build_with_git_url(self):
build_url = "https://github.com/dnephin/docker-build-from-url.git"
service = self.create_service('buildwithurl', build={'context': build_url})
self.addCleanup(self.client.remove_image, service.image_name)
service.build()
assert service.image()
def test_build_with_build_args(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
f.write("ARG build_version\n")
service = self.create_service('buildwithargs',
build={'context': text_type(base_dir),
'args': {"build_version": "1"}})
service.build()
assert service.image()
def test_start_container_stays_unprivileged(self):
service = self.create_service('web')
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], False)
def test_start_container_becomes_privileged(self):
service = self.create_service('web', privileged=True)
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], True)
def test_expose_does_not_publish_ports(self):
service = self.create_service('web', expose=["8000"])
container = create_and_start_container(service).inspect()
self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None})
def test_start_container_creates_port_with_explicit_protocol(self):
service = self.create_service('web', ports=['8000/udp'])
container = create_and_start_container(service).inspect()
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/udp'])
def test_start_container_creates_fixed_external_ports(self):
service = self.create_service('web', ports=['8000:8000'])
container = create_and_start_container(service).inspect()
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
service = self.create_service('web', ports=['8001:8000'])
container = create_and_start_container(service).inspect()
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8001')
def test_port_with_explicit_interface(self):
service = self.create_service('web', ports=[
'127.0.0.1:8001:8000',
'0.0.0.0:9001:9000/udp',
])
container = create_and_start_container(service).inspect()
self.assertEqual(container['NetworkSettings']['Ports'], {
'8000/tcp': [
{
'HostIp': '127.0.0.1',
'HostPort': '8001',
},
],
'9000/udp': [
{
'HostIp': '0.0.0.0',
'HostPort': '9001',
},
],
})
def test_create_with_image_id(self):
# Get image id for the current busybox:latest
pull_busybox(self.client)
image_id = self.client.inspect_image('busybox:latest')['Id'][:12]
service = self.create_service('foo', image=image_id)
service.create_container()
def test_scale(self):
service = self.create_service('web')
service.scale(1)
self.assertEqual(len(service.containers()), 1)
# Ensure containers don't have stdout or stdin connected
container = service.containers()[0]
config = container.inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
service.scale(3)
self.assertEqual(len(service.containers()), 3)
service.scale(1)
self.assertEqual(len(service.containers()), 1)
service.scale(0)
self.assertEqual(len(service.containers()), 0)
def test_scale_with_stopped_containers(self):
"""
Given there are some stopped containers and scale is called with a
desired number that is the same as the number of stopped containers,
test that those containers are restarted and not removed/recreated.
"""
service = self.create_service('web')
next_number = service._next_container_number()
valid_numbers = [next_number, next_number + 1]
service.create_container(number=next_number)
service.create_container(number=next_number + 1)
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
service.scale(2)
for container in service.containers():
self.assertTrue(container.is_running)
self.assertTrue(container.number in valid_numbers)
captured_output = mock_stderr.getvalue()
self.assertNotIn('Creating', captured_output)
self.assertIn('Starting', captured_output)
def test_scale_with_stopped_containers_and_needing_creation(self):
"""
Given there are some stopped containers and scale is called with a
desired number that is greater than the number of stopped containers,
test that those containers are restarted and required number are created.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
for container in service.containers():
self.assertFalse(container.is_running)
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
service.scale(2)
self.assertEqual(len(service.containers()), 2)
for container in service.containers():
self.assertTrue(container.is_running)
captured_output = mock_stderr.getvalue()
self.assertIn('Creating', captured_output)
self.assertIn('Starting', captured_output)
def test_scale_with_api_error(self):
"""Test that when scaling if the API returns an error, that error is handled
and the remaining threads continue.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
with mock.patch(
'compose.container.Container.create',
side_effect=APIError(
message="testing",
response={},
explanation="Boom")):
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
with self.assertRaises(ServiceError):
service.scale(3)
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.assertIn(
"ERROR: for composetest_web_2 Cannot create container for service web: Boom",
mock_stderr.getvalue()
)
def test_scale_with_unexpected_exception(self):
"""Test that when scaling if the API returns an error, that is not of type
APIError, that error is re-raised.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
with mock.patch(
'compose.container.Container.create',
side_effect=ValueError("BOOM")
):
with self.assertRaises(ValueError):
service.scale(3)
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
@mock.patch('compose.service.log')
def test_scale_with_desired_number_already_achieved(self, mock_log):
"""
Test that calling scale with a desired number that is equal to the
number of containers already running results in no change.
"""
service = self.create_service('web')
next_number = service._next_container_number()
container = service.create_container(number=next_number, quiet=True)
container.start()
container.inspect()
assert container.is_running
assert len(service.containers()) == 1
service.scale(1)
assert len(service.containers()) == 1
container.inspect()
assert container.is_running
captured_output = mock_log.info.call_args[0]
assert 'Desired container number already achieved' in captured_output
@mock.patch('compose.service.log')
def test_scale_with_custom_container_name_outputs_warning(self, mock_log):
"""Test that calling scale on a service that has a custom container name
results in warning output.
"""
service = self.create_service('app', container_name='custom-container')
self.assertEqual(service.custom_container_name, 'custom-container')
with self.assertRaises(ServiceError):
service.scale(3)
captured_output = mock_log.warn.call_args[0][0]
self.assertEqual(len(service.containers()), 1)
self.assertIn(
"Remove the custom name to scale the service.",
captured_output
)
def test_scale_sets_ports(self):
service = self.create_service('web', ports=['8000'])
service.scale(2)
containers = service.containers()
self.assertEqual(len(containers), 2)
for container in containers:
self.assertEqual(
list(container.get('HostConfig.PortBindings')),
['8000/tcp'])
def test_scale_with_immediate_exit(self):
service = self.create_service('web', image='busybox', command='true')
service.scale(2)
assert len(service.containers(stopped=True)) == 2
def test_network_mode_none(self):
service = self.create_service('web', network_mode=NetworkMode('none'))
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'none')
def test_network_mode_bridged(self):
service = self.create_service('web', network_mode=NetworkMode('bridge'))
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'bridge')
def test_network_mode_host(self):
service = self.create_service('web', network_mode=NetworkMode('host'))
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
def test_pid_mode_none_defined(self):
service = self.create_service('web', pid=None)
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), '')
def test_pid_mode_host(self):
service = self.create_service('web', pid='host')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), 'host')
def test_dns_no_value(self):
service = self.create_service('web')
container = create_and_start_container(service)
self.assertIsNone(container.get('HostConfig.Dns'))
def test_dns_list(self):
service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
def test_mem_swappiness(self):
service = self.create_service('web', mem_swappiness=11)
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.MemorySwappiness'), 11)
def test_restart_always_value(self):
service = self.create_service('web', restart={'Name': 'always'})
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
def test_oom_score_adj_value(self):
service = self.create_service('web', oom_score_adj=500)
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.OomScoreAdj'), 500)
def test_restart_on_failure_value(self):
service = self.create_service('web', restart={
'Name': 'on-failure',
'MaximumRetryCount': 5
})
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure')
self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5)
def test_cap_add_list(self):
service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.CapAdd'), ['SYS_ADMIN', 'NET_ADMIN'])
def test_cap_drop_list(self):
service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN'])
def test_dns_search(self):
service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com'])
@v2_only()
def test_tmpfs(self):
service = self.create_service('web', tmpfs=['/run'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Tmpfs'), {'/run': ''})
def test_working_dir_param(self):
service = self.create_service('container', working_dir='/working/dir/sample')
container = service.create_container()
self.assertEqual(container.get('Config.WorkingDir'), '/working/dir/sample')
def test_split_env(self):
service = self.create_service(
'web',
environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
env = create_and_start_container(service).environment
for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
self.assertEqual(env[k], v)
def test_env_from_file_combined_with_env(self):
service = self.create_service(
'web',
environment=['ONE=1', 'TWO=2', 'THREE=3'],
env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
env = create_and_start_container(service).environment
for k, v in {
'ONE': '1',
'TWO': '2',
'THREE': '3',
'FOO': 'baz',
'DOO': 'dah'
}.items():
self.assertEqual(env[k], v)
@mock.patch.dict(os.environ)
def test_resolve_env(self):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service = self.create_service(
'web',
environment={
'FILE_DEF': 'F1',
'FILE_DEF_EMPTY': '',
'ENV_DEF': None,
'NO_DEF': None
}
)
env = create_and_start_container(service).environment
for k, v in {
'FILE_DEF': 'F1',
'FILE_DEF_EMPTY': '',
'ENV_DEF': 'E3',
'NO_DEF': None
}.items():
self.assertEqual(env[k], v)
def test_with_high_enough_api_version_we_get_default_network_mode(self):
# TODO: remove this test once minimum docker version is 1.8.x
with mock.patch.object(self.client, '_version', '1.20'):
service = self.create_service('web')
service_config = service._get_container_host_config({})
self.assertEquals(service_config['NetworkMode'], 'default')
def test_labels(self):
labels_dict = {
'com.example.description': "Accounting webapp",
'com.example.department': "Finance",
'com.example.label-with-empty-value': "",
}
compose_labels = {
LABEL_CONTAINER_NUMBER: '1',
LABEL_ONE_OFF: 'False',
LABEL_PROJECT: 'composetest',
LABEL_SERVICE: 'web',
LABEL_VERSION: __version__,
}
expected = dict(labels_dict, **compose_labels)
service = self.create_service('web', labels=labels_dict)
labels = create_and_start_container(service).labels.items()
for pair in expected.items():
self.assertIn(pair, labels)
def test_empty_labels(self):
labels_dict = {'foo': '', 'bar': ''}
service = self.create_service('web', labels=labels_dict)
labels = create_and_start_container(service).labels.items()
for name in labels_dict:
self.assertIn((name, ''), labels)
def test_stop_signal(self):
stop_signal = 'SIGINT'
service = self.create_service('web', stop_signal=stop_signal)
container = create_and_start_container(service)
self.assertEqual(container.stop_signal, stop_signal)
def test_custom_container_name(self):
service = self.create_service('web', container_name='my-web-container')
self.assertEqual(service.custom_container_name, 'my-web-container')
container = create_and_start_container(service)
self.assertEqual(container.name, 'my-web-container')
one_off_container = service.create_container(one_off=True)
self.assertNotEqual(one_off_container.name, 'my-web-container')
@pytest.mark.skipif(True, reason="Broken on 1.11.0rc1")
def test_log_drive_invalid(self):
service = self.create_service('web', logging={'driver': 'xxx'})
expected_error_msg = "logger: no log driver named 'xxx' is registered"
with self.assertRaisesRegexp(APIError, expected_error_msg):
create_and_start_container(service)
def test_log_drive_empty_default_jsonfile(self):
service = self.create_service('web')
log_config = create_and_start_container(service).log_config
self.assertEqual('json-file', log_config['Type'])
self.assertFalse(log_config['Config'])
def test_log_drive_none(self):
service = self.create_service('web', logging={'driver': 'none'})
log_config = create_and_start_container(service).log_config
self.assertEqual('none', log_config['Type'])
self.assertFalse(log_config['Config'])
def test_devices(self):
service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
device_config = create_and_start_container(service).get('HostConfig.Devices')
device_dict = {
'PathOnHost': '/dev/random',
'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/mapped-random'
}
self.assertEqual(1, len(device_config))
self.assertDictEqual(device_dict, device_config[0])
def test_duplicate_containers(self):
service = self.create_service('web')
options = service._get_container_create_options({}, 1)
original = Container.create(service.client, **options)
self.assertEqual(set(service.containers(stopped=True)), set([original]))
self.assertEqual(set(service.duplicate_containers()), set())
options['name'] = 'temporary_container_name'
duplicate = Container.create(service.client, **options)
self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
def converge(service, strategy=ConvergenceStrategy.changed):
"""Create a converge plan from a strategy and execute the plan."""
plan = service.convergence_plan(strategy)
return service.execute_convergence_plan(plan, timeout=1)
class ConfigHashTest(DockerClientTestCase):
def test_no_config_hash_when_one_off(self):
web = self.create_service('web')
container = web.create_container(one_off=True)
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_no_config_hash_when_overriding_options(self):
web = self.create_service('web')
container = web.create_container(environment={'FOO': '1'})
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_config_hash_with_custom_labels(self):
web = self.create_service('web', labels={'foo': '1'})
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
self.assertIn('foo', container.labels)
def test_config_hash_sticks_around(self):
web = self.create_service('web', command=["top"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
web = self.create_service('web', command=["top", "-d", "1"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
| {
"content_hash": "b49d03bbc0368054acc277fb95d49d46",
"timestamp": "",
"source": "github",
"line_count": 1086,
"max_line_length": 103,
"avg_line_length": 40.79189686924494,
"alnum_prop": 0.6220090293453725,
"repo_name": "andrewgee/compose",
"id": "c1be6813055c978e253600ea24bba0504c0655ad",
"size": "44300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/service_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2600"
},
{
"name": "Python",
"bytes": "628075"
},
{
"name": "Shell",
"bytes": "25585"
}
],
"symlink_target": ""
} |
from .grid_plotly import plotly_grid
from .grid_qgrid import qgrid_grid
from .grid_psp import psp_grid
from .grid_phosphor import phosphor_grid
from .grid_ipysheet import ipysheet_grid
from .grid_lineup import lineup_grid
_BACKENDS = ['plotly', 'qgrid', 'psp', 'phosphor', 'ipysheet', 'lineup']
def _backend_to_grid_foo(backend, theme=None):
if backend == 'plotly' or backend == 'cufflinks':
return plotly_grid
if backend == 'qgrid':
return qgrid_grid
if backend == 'psp':
return psp_grid
if backend == 'phosphor':
return phosphor_grid
if backend == 'ipysheet':
return ipysheet_grid
if backend == 'lineup':
return lineup_grid
raise NotImplementedError()
def grid(data, backend='psp', **kwargs):
if backend not in _BACKENDS:
raise Exception('Must pick backend in %s' % _BACKENDS)
return _backend_to_grid_foo(backend)(data, **kwargs)
| {
"content_hash": "f01c48db043e1957c973c22dd3f098c2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 30,
"alnum_prop": 0.6612903225806451,
"repo_name": "timkpaine/lantern",
"id": "9fd5f06377eec67c4e962eefd8875c9ca81b0326",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lantern/grids/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2428"
},
{
"name": "JavaScript",
"bytes": "1323"
},
{
"name": "Jupyter Notebook",
"bytes": "22181"
},
{
"name": "Makefile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "183613"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0090_auto_20150315_0612'),
]
operations = [
migrations.AlterField(
model_name='taxsaveinputs',
name='ACTC_ChildNum',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='AMED_trt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='AMT_prt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='AMT_trt1',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='AMT_trt2',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='CTC_prt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='FICA_trt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='ID_Casualty_frt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='ID_Charity_crt_Asset',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='ID_Charity_crt_Cash',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='ID_Miscellaneous_frt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='ID_crt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='ID_medical_frt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='taxsaveinputs',
name='ID_prt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
]
| {
"content_hash": "0cf192e75871544fe8c940205de7b3ab",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 119,
"avg_line_length": 41.142857142857146,
"alnum_prop": 0.6096230158730159,
"repo_name": "PeterDSteinberg/webapp-public",
"id": "59d8991ea9f7c79c4e672d1ca8a1215d8a6b1d94",
"size": "4056",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0091_auto_20150317_1820.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61933"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "381167"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
import inspect
from librato_python_web.instrumentor.instrument import get_complex_wrapper
from librato_python_web.instrumentor.objproxies import ObjectWrapper
from librato_python_web.instrumentor.base_instrumentor import BaseInstrumentor
class WrappedCursor(ObjectWrapper):
""" Wraps native cursor class to permit instrumentation """
""" Native class methods can't be instrumented in-place """
def callproc(self, *args, **kwargs):
return self.__subject__.callproc(*args, **kwargs)
def execute(self, *args, **kwargs):
return self.__subject__.execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
return self.__subject__.executemany(*args, **kwargs)
def fetchone(self, *args, **kwargs):
return self.__subject__.fetchone(*args, **kwargs)
def fetchmany(self, *args, **kwargs):
return self.__subject__.fetchmany(*args, **kwargs)
def fetchall(self, *args, **kwargs):
return self.__subject__.fetchall(*args, **kwargs)
def nextset(self, *args, **kwargs):
return self.__subject__.nextset(*args, **kwargs)
class ConnWrapper(ObjectWrapper):
""" Wraps native connection class to permit instrumentation """
def cursor(self):
cursor = self.__subject__.cursor()
return WrappedCursor(cursor)
def wrapped_connect(func, *args, **kwargs):
""" Returns a wrapped connection which intercepts cursor() """
conn = func(*args, **kwargs)
return ConnWrapper(conn)
class Psycopg2Instrumentor(BaseInstrumentor):
modules = {'psycopg2': ['connect'], 'psycopg2.extensions': ['connection', 'cursor']}
def __init__(self):
super(Psycopg2Instrumentor, self).__init__()
def run(self):
""" Instruments our cursor wrapper class and psycopg2.connect """
# Generate a list of methods in the cursor wrapper
meth_names = [n for (n, _) in inspect.getmembers(WrappedCursor) if '_' not in n]
meths = {
'librato_python_web.instrumentor.data.psycopg2.WrappedCursor.' + m:
get_complex_wrapper('data.psycopg2.%s.' % m, state='data.psycopg2', disable_if='model')
for m in meth_names
}
# Instrument connect method
meths['psycopg2.connect'] = wrapped_connect
self.set_wrapped(meths)
super(Psycopg2Instrumentor, self).run()
| {
"content_hash": "99cf341add9857bfae1d75a6fc742650",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 103,
"avg_line_length": 34.779411764705884,
"alnum_prop": 0.6536997885835095,
"repo_name": "librato/librato-python-web",
"id": "e18ad93a68e5ecd27e435a1188b317ff490d3c32",
"size": "3906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "librato_python_web/instrumentor/data/psycopg2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "278741"
}
],
"symlink_target": ""
} |
'''
Calculate the coverage of each genome pair from the blastx results
Starting with the blastx converted to NC/NC ids, we want to calculate
the coverage at each position in every genome.
We will consider all the genomes with the most number of bases in the
phage as the top genomes
'''
import sys
from phage import Phage
phage=Phage()
try:
f=sys.argv[1]
except:
sys.exit(sys.argv[0] + " <blast output file converted to NC/NC format. Probably phage.genomes.blastx")
count={}
lens=phage.phageSequenceLengths()
bctG = set(phage.completeBacteriaIDs())
phgG = set(phage.phageIDs())
for p in phgG:
count[p]={}
sys.stderr.write("Reading " + f + "\n")
with open(f, 'r') as bin:
for l in bin:
p=l.strip().split("\t")
if p[0] not in phgG:
continue
if p[1] not in bctG:
continue
if p[1] not in count[p[0]]:
count[p[0]][p[1]]=[]
for i in range(lens[p[0]]+1):
count[p[0]][p[1]].append(0)
s = int(p[6])
e = int(p[7])
if e < s:
(s,e)=(e,s)
for i in range(s,e+1):
count[p[0]][p[1]][i]=1
sys.stderr.write("Found " + str(len(count)) + ' matches\n')
for p in count:
tot=0
genomes=[]
for b in count[p]:
c = sum(count[p][b])
if c > tot:
tot = c
genomes = [b]
elif c == tot:
genomes.append(b)
print(p + "\t" + "\t".join(genomes))
sys.stderr.write("Done")
| {
"content_hash": "3e34737fb840dd90b4baa17c07cb93d3",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 106,
"avg_line_length": 23.296875,
"alnum_prop": 0.5546613011401744,
"repo_name": "linsalrob/PhageHosts",
"id": "166fae47c729a9129f2bcee0edf071baba415d19",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/blastx_coverage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8127"
},
{
"name": "Perl",
"bytes": "36107"
},
{
"name": "Perl 6",
"bytes": "453"
},
{
"name": "Python",
"bytes": "184154"
}
],
"symlink_target": ""
} |
from .error_response import ErrorResponse, ErrorResponseException
from .operation_display import OperationDisplay
from .operation import Operation
from .resource import Resource
from .tags_resource import TagsResource
from .application_insights_component import ApplicationInsightsComponent
from .web_test_geolocation import WebTestGeolocation
from .web_test_properties_configuration import WebTestPropertiesConfiguration
from .web_test import WebTest
from .application_insights_component_export_request import ApplicationInsightsComponentExportRequest
from .application_insights_component_export_configuration import ApplicationInsightsComponentExportConfiguration
from .application_insights_component_data_volume_cap import ApplicationInsightsComponentDataVolumeCap
from .application_insights_component_billing_features import ApplicationInsightsComponentBillingFeatures
from .application_insights_component_quota_status import ApplicationInsightsComponentQuotaStatus
from .api_key_request import APIKeyRequest
from .application_insights_component_api_key import ApplicationInsightsComponentAPIKey
from .operation_paged import OperationPaged
from .application_insights_component_paged import ApplicationInsightsComponentPaged
from .web_test_paged import WebTestPaged
from .application_insights_component_api_key_paged import ApplicationInsightsComponentAPIKeyPaged
from .application_insights_management_client_enums import (
ApplicationType,
FlowType,
RequestSource,
WebTestKind,
)
__all__ = [
'ErrorResponse', 'ErrorResponseException',
'OperationDisplay',
'Operation',
'Resource',
'TagsResource',
'ApplicationInsightsComponent',
'WebTestGeolocation',
'WebTestPropertiesConfiguration',
'WebTest',
'ApplicationInsightsComponentExportRequest',
'ApplicationInsightsComponentExportConfiguration',
'ApplicationInsightsComponentDataVolumeCap',
'ApplicationInsightsComponentBillingFeatures',
'ApplicationInsightsComponentQuotaStatus',
'APIKeyRequest',
'ApplicationInsightsComponentAPIKey',
'OperationPaged',
'ApplicationInsightsComponentPaged',
'WebTestPaged',
'ApplicationInsightsComponentAPIKeyPaged',
'ApplicationType',
'FlowType',
'RequestSource',
'WebTestKind',
]
| {
"content_hash": "dc7aca1d71d50fd4e30f3dc59f42e370",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 112,
"avg_line_length": 42.9622641509434,
"alnum_prop": 0.826526130873957,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "8318a9567e80ecb3caa9f15f9b5cf231475e7044",
"size": "2751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""
.. code-block:: javascript
import { Modal } from 'react-native'
A Layer Group can be tagged as **Modal**. The child layers will
constitute the content of the **Modal**.
"""
from .component import Component
from .view import View
from .commons import StyleSheet
from ..props import Props, PropTypes
class Modal(Component):
def __init__(self, props=None, parent=None, layer=None):
super(self.__class__, self).__init__(parent=parent, layer=layer)
self.name = 'Modal'
self.path = 'react-native'
self.is_default = False
self.props = Props({
'animationType': PropTypes.oneOf(['none', 'slide', 'fade']),
'presentationStyle': PropTypes.oneOf(
['fullScreen', 'pageSheet', 'formSheet', 'overFullScreen']),
'transparent': PropTypes.bool,
'hardwareAccelerated': PropTypes.bool,
'visible': PropTypes.bool,
'onRequestClose': PropTypes.func.isRequired,
'onShow': PropTypes.func,
'supportedOrientations': PropTypes.arrayOf(PropTypes.oneOf(
['portrait', 'portrait-upside-down', 'landscape',
'landscape-left', 'landscape-right'])),
'onOrientationChange': PropTypes.func,
})
self.update_props(props)
self.is_self_closing = False
@classmethod
def create_component(cls, sketch_layer, parent=None):
if sketch_layer.component:
props = sketch_layer.component.get_react_native_props()
else:
props = dict()
component = Modal(props, parent=parent, layer=sketch_layer)
component.set_position(sketch_layer.get_position())
# Modal doesn't have style props so adding a intermediate view
# component
child_view_props = {
'style': StyleSheet(sketch_layer.get_css_view_styles(),
name=sketch_layer.name)}
child_view_component = View(child_view_props, parent=component,
layer=sketch_layer)
component.add_child(child_view_component)
for layer in sketch_layer.layers:
child = layer.get_react_component(parent=child_view_component)
if child:
child_view_component.add_child(child)
return component
| {
"content_hash": "461a1ae38870a3eac4f1d6ba8df2023a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 76,
"avg_line_length": 39.610169491525426,
"alnum_prop": 0.6033376123234917,
"repo_name": "ibhubs/sketch-components",
"id": "8649f198cf222c838b6ffecfc564b11b4bc6347b",
"size": "2337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sketch_components/engines/react/native/components/modal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "101982"
},
{
"name": "Makefile",
"bytes": "2309"
},
{
"name": "Mako",
"bytes": "13124"
},
{
"name": "Python",
"bytes": "298639"
}
],
"symlink_target": ""
} |
from .constants import AGNOCOMPLETE_USER_ATTRIBUTE
class UserContextFormMixin(object):
"""
Form Mixin that passes the user context to its fields.
This property takes the name of the ``AGNOCOMPLETE_USER_ATTRIBUTE``
constant value, to avoid conflicting with any other field property.
This value will be accessed at validation time, and may only concern
autocomplete fields that are using context-based querysets
(e.g.: :class:`AgnocompleteModelField`).
"""
def __init__(self, user, *args, **kwargs):
self.user = user
super(UserContextFormMixin, self).__init__(*args, **kwargs)
if self.user:
for field in self.fields.values():
setattr(field, AGNOCOMPLETE_USER_ATTRIBUTE, self.user)
| {
"content_hash": "7a0d035a1b6349c8abc9b315f98ed861",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 38.55,
"alnum_prop": 0.682230869001297,
"repo_name": "novafloss/django-agnocomplete",
"id": "2a47967611efac887db799ce34adb3b743e8e474",
"size": "771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agnocomplete/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "766"
},
{
"name": "Python",
"bytes": "42627"
}
],
"symlink_target": ""
} |
import bookmark_pyparser as bpp
import os
from optparse import OptionParser
usage="""usage: %prog [options] dir_path1 dir_path2
All html files in the given directories will be assumed to be firefox
bookmark.html files.
If no directory is given then the current directory will be used"""
parser = OptionParser(usage=usage)
parser.add_option("-r","--recursive",
action="store_true",dest="recursive", default=False,
help="Recursively explore given directory for bookmark files")
parser.add_option("-o", "--outfile", dest="outfile",default="merged bookmarks.html",
help="write output to FILE [default: %default]", metavar="FILE")
(options, args) = parser.parse_args()
outfile=options.outfile
recursive=options.recursive
if args==[]:
dir_path='.'
else:
dir_path=args
#finds a list (recursively) of all html (bookmark) files in the chosen directory
htmlfiles=[]
for path in dir_path:
if recursive==True:
for root,dirs,files in os.walk(path):
print root
htmlfiles_tmp=[os.path.join(root,fils) for fils in files if fils.split('.')[-1].lower()=='html']
htmlfiles.extend(htmlfiles_tmp)
else:
root=os.path.abspath(path)
files=os.listdir(path)
print root
htmlfiles_tmp=[os.path.join(root,fils) for fils in files if fils.split('.')[-1].lower()=='html']
htmlfiles.extend(htmlfiles_tmp)
print
result={}
numhref=0
for bookmarkfile in htmlfiles:
print '##### parsing ', os.path.relpath(bookmarkfile,path)
parsedfile=bpp.bookmarkshtml.parseFile(file(bookmarkfile))
numhref+=len(bpp.hyperlinks(parsedfile))
print '#### creating a bookmarkDict '
bmDict=bpp.bookmarkDict(parsedfile)
print '#### merging latest file into result'
result=bpp.merge_bookmarkDict(result,bmDict)
finalfile=file(outfile, 'w')
finalstr=bpp.serialize_bookmarkDict(result)
finalfile.write(finalstr)
finalfile.close()
print 'total nunber of hyperlinks found = ', numhref
print 'number of hyperlinks in final file=', len(bpp.hyperlinks_bookmarkDict(result))
print 'number of unique hyperlinks =', len(set(bpp.hyperlinks_bookmarkDict(result)))
print 'number of folders =', bpp.count_folders(result)
| {
"content_hash": "163515fc886577afc03d7473070cf76b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 108,
"avg_line_length": 35.484848484848484,
"alnum_prop": 0.6673783091374893,
"repo_name": "ElderDelp/elisp-collection",
"id": "18bbf5957aa3c9d785d145230f131fd910c68f52",
"size": "2402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marks/example_bookmark_merger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "4211200"
},
{
"name": "HTML",
"bytes": "1312509"
},
{
"name": "Makefile",
"bytes": "7862"
},
{
"name": "Perl",
"bytes": "76494"
},
{
"name": "Python",
"bytes": "22839"
},
{
"name": "Shell",
"bytes": "335899"
},
{
"name": "Visual Basic",
"bytes": "227749"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.