filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_29164 | from tkinter import *
window = Tk()
window.geometry("400x300+20+10")
window.title('The Grid Manager')
class MyWindow:
def __init__(self,window):
self.lbl1 = Entry(window,bd=3,justify="center")
self.lbl1.grid(row=0,column=0,padx=2)
self.lbl1.insert(0,"Standard Calculator")
self.lbl2 = Entry(window,bd=3,justify="center")
self.lbl2.grid(row=1,column=0,padx=2)
self.lbl2.insert(0,"Input 1st Number :")
self.txtfld2 = Entry(window,bd=3)
self.txtfld2.grid(row=1,column=1)
self.lbl3 = Entry(window,bd=3,justify="center")
self.lbl3.grid(row=2,column=0,padx=2)
self.lbl3.insert(0,"Input 2nd Number :")
self.txtfld3=Entry(window,bd=3)
self.txtfld3.grid(row=2,column=1,padx=2)
self.lbl4 = Entry(window,bd=3,justify="center")
self.lbl4.grid(row=3,column=0,padx=2)
self.lbl4.insert(0,"Select Operators :")
self.btn1=Button(window,text="Addition(+)",command=self.add)
self.btn1.grid(row=4,column=0,padx=2)
self.btn2 = Button(window,text="Subtraction(-)")
self.btn2.grid(row=4,column=1,padx=2)
self.btn2.bind('<Button-1>',self.subtract)
self.btn3=Button(window,text="Multiply(*)", command=self.multiply)
self.btn3.grid(row=5,column=0,padx=2)
self.btn4=Button(window,text="Division(/)")
self.btn4.grid(row=5,column=1,padx=2)
self.btn4.bind('<Button-1>',self.division)
self.lbl5=Entry(window,bd=3,justify="center")
self.lbl5.grid(row=7,column=0,padx=2)
self.lbl5.insert(0,"Result :")
self.txtfld4=Entry(window,bd=4)
self.txtfld4.grid(row=7,column=1,padx=2)
def add(self):
self.txtfld4.delete(0,'end')
num1=int(self.txtfld2.get())
num2=int(self.txtfld3.get())
answer = num1+num2
self.txtfld4.insert(END,answer)
def subtract(self,event):
self.txtfld4.delete(0,'end')
num1=int(self.txtfld2.get())
num2=int(self.txtfld3.get())
answer = num1-num2
self.txtfld4.insert(END,answer)
def multiply(self):
self.txtfld4.delete(0,'end')
num1=int(self.txtfld2.get())
num2=int(self.txtfld3.get())
answer = num1*num2
self.txtfld4.insert(END,answer)
def division(self,event):
self.txtfld4.delete(0,'end')
num1=int(self.txtfld2.get())
num2=int(self.txtfld3.get())
answer = num1/num2
self.txtfld4.insert(END,str(answer))
mywin = MyWindow(window)
window.mainloop() |
the-stack_106_29165 | from uuid import uuid4
from django.db import models
class BaseModelMixin(models.Model):
"""
Abstract Model class creation and modification datetimes
['site', 'created', 'updated']
"""
slug = models.SlugField(
max_length=100, unique=True, db_index=True,
default=uuid4, editable=False
)
created = models.DateTimeField(
("creation date and time"),
editable=False,
auto_now_add=True,)
updated = models.DateTimeField(
("update date and time"),
auto_now=True,
editable=False
)
class Meta:
abstract = True
|
the-stack_106_29166 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KServe
Python SDK for KServe # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kserve
from kserve.models.v1beta1_explainer_extension_spec import V1beta1ExplainerExtensionSpec # noqa: E501
from kserve.rest import ApiException
class TestV1beta1ExplainerExtensionSpec(unittest.TestCase):
"""V1beta1ExplainerExtensionSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1ExplainerExtensionSpec
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kserve.models.v1beta1_explainer_extension_spec.V1beta1ExplainerExtensionSpec() # noqa: E501
if include_optional :
return V1beta1ExplainerExtensionSpec(
args = [
'0'
],
command = [
'0'
],
config = {
'key' : '0'
},
env = [
None
],
env_from = [
None
],
image = '0',
image_pull_policy = '0',
lifecycle = None,
liveness_probe = None,
name = '0',
ports = [
None
],
readiness_probe = None,
resources = None,
runtime_version = '0',
security_context = None,
startup_probe = None,
stdin = True,
stdin_once = True,
storage_uri = '0',
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
volume_devices = [
None
],
volume_mounts = [
None
],
working_dir = '0'
)
else :
return V1beta1ExplainerExtensionSpec(
)
def testV1beta1ExplainerExtensionSpec(self):
"""Test V1beta1ExplainerExtensionSpec"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_29169 | # Natural Language Toolkit: Table widget
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Tkinter widgets for displaying multi-column listboxes and tables.
"""
from __future__ import division
import nltk.compat
import operator
from tkinter import (Frame, Label, Listbox, Scrollbar, Tk)
######################################################################
# Multi-Column Listbox
######################################################################
class MultiListbox(Frame):
"""
A multi-column listbox, where the current selection applies to an
entire row. Based on the MultiListbox Tkinter widget
recipe from the Python Cookbook (http://code.activestate.com/recipes/52266/)
For the most part, ``MultiListbox`` methods delegate to its
contained listboxes. For any methods that do not have docstrings,
see ``Tkinter.Listbox`` for a description of what that method does.
"""
#/////////////////////////////////////////////////////////////////
# Configuration
#/////////////////////////////////////////////////////////////////
#: Default configuration values for the frame.
FRAME_CONFIG = dict(background='#888',
takefocus=True,
highlightthickness=1)
#: Default configurations for the column labels.
LABEL_CONFIG = dict(borderwidth=1, relief='raised',
font='helvetica -16 bold',
background='#444', foreground='white')
#: Default configuration for the column listboxes.
LISTBOX_CONFIG = dict(borderwidth=1,
selectborderwidth=0,
highlightthickness=0,
exportselection=False,
selectbackground='#888',
activestyle='none',
takefocus=False)
#/////////////////////////////////////////////////////////////////
# Constructor
#/////////////////////////////////////////////////////////////////
def __init__(self, master, columns, column_weights=None, cnf={}, **kw):
"""
Construct a new multi-column listbox widget.
:param master: The widget that should contain the new
multi-column listbox.
:param columns: Specifies what columns should be included in
the new multi-column listbox. If ``columns`` is an integer,
the it is the number of columns to include. If it is
a list, then its length indicates the number of columns
to include; and each element of the list will be used as
a label for the corresponding column.
:param cnf, kw: Configuration parameters for this widget.
Use ``label_*`` to configure all labels; and ``listbox_*``
to configure all listboxes. E.g.:
>>> mlb = MultiListbox(master, 5, label_foreground='red')
"""
# If columns was specified as an int, convert it to a list.
if isinstance(columns, int):
columns = list(range(columns))
include_labels = False
else:
include_labels = True
if len(columns) == 0:
raise ValueError("Expected at least one column")
# Instance variables
self._column_names = tuple(columns)
self._listboxes = []
self._labels = []
# Pick a default value for column_weights, if none was specified.
if column_weights is None:
column_weights = [1] * len(columns)
elif len(column_weights) != len(columns):
raise ValueError('Expected one column_weight for each column')
self._column_weights = column_weights
# Configure our widgets.
Frame.__init__(self, master, **self.FRAME_CONFIG)
self.grid_rowconfigure(1, weight=1)
for i, label in enumerate(self._column_names):
self.grid_columnconfigure(i, weight=column_weights[i])
# Create a label for the column
if include_labels:
l = Label(self, text=label, **self.LABEL_CONFIG)
self._labels.append(l)
l.grid(column=i, row=0, sticky='news', padx=0, pady=0)
l.column_index = i
# Create a listbox for the column
lb = Listbox(self, **self.LISTBOX_CONFIG)
self._listboxes.append(lb)
lb.grid(column=i, row=1, sticky='news', padx=0, pady=0)
lb.column_index = i
# Clicking or dragging selects:
lb.bind('<Button-1>', self._select)
lb.bind('<B1-Motion>', self._select)
# Scroll whell scrolls:
lb.bind('<Button-4>', lambda e: self._scroll(-1))
lb.bind('<Button-5>', lambda e: self._scroll(+1))
lb.bind('<MouseWheel>', lambda e: self._scroll(e.delta))
# Button 2 can be used to scan:
lb.bind('<Button-2>', lambda e: self.scan_mark(e.x, e.y))
lb.bind('<B2-Motion>', lambda e: self.scan_dragto(e.x, e.y))
# Dragging outside the window has no effect (diable
# the default listbox behavior, which scrolls):
lb.bind('<B1-Leave>', lambda e: 'break')
# Columns can be resized by dragging them:
l.bind('<Button-1>', self._resize_column)
# Columns can be resized by dragging them. (This binding is
# used if they click on the grid between columns:)
self.bind('<Button-1>', self._resize_column)
# Set up key bindings for the widget:
self.bind('<Up>', lambda e: self.select(delta=-1))
self.bind('<Down>', lambda e: self.select(delta=1))
self.bind('<Prior>', lambda e: self.select(delta=-self._pagesize()))
self.bind('<Next>', lambda e: self.select(delta=self._pagesize()))
# Configuration customizations
self.configure(cnf, **kw)
#/////////////////////////////////////////////////////////////////
# Column Resizing
#/////////////////////////////////////////////////////////////////
def _resize_column(self, event):
"""
Callback used to resize a column of the table. Return ``True``
if the column is actually getting resized (if the user clicked
on the far left or far right 5 pixels of a label); and
``False`` otherwies.
"""
# If we're already waiting for a button release, then ignore
# the new button press.
if event.widget.bind('<ButtonRelease>'):
return False
# Decide which column (if any) to resize.
self._resize_column_index = None
if event.widget is self:
for i, lb in enumerate(self._listboxes):
if abs(event.x-(lb.winfo_x()+lb.winfo_width())) < 10:
self._resize_column_index = i
elif event.x > (event.widget.winfo_width()-5):
self._resize_column_index = event.widget.column_index
elif event.x < 5 and event.widget.column_index != 0:
self._resize_column_index = event.widget.column_index-1
# Bind callbacks that are used to resize it.
if self._resize_column_index is not None:
event.widget.bind('<Motion>', self._resize_column_motion_cb)
event.widget.bind('<ButtonRelease-%d>' % event.num,
self._resize_column_buttonrelease_cb)
return True
else:
return False
def _resize_column_motion_cb(self, event):
lb = self._listboxes[self._resize_column_index]
charwidth = lb.winfo_width() / lb['width']
x1 = event.x + event.widget.winfo_x()
x2 = lb.winfo_x() + lb.winfo_width()
lb['width'] = max(3, lb['width'] + (x1-x2) // charwidth)
def _resize_column_buttonrelease_cb(self, event):
event.widget.unbind('<ButtonRelease-%d>' % event.num)
event.widget.unbind('<Motion>')
#/////////////////////////////////////////////////////////////////
# Properties
#/////////////////////////////////////////////////////////////////
@property
def column_names(self):
"""
A tuple containing the names of the columns used by this
multi-column listbox.
"""
return self._column_names
@property
def column_labels(self):
"""
A tuple containing the ``Tkinter.Label`` widgets used to
display the label of each column. If this multi-column
listbox was created without labels, then this will be an empty
tuple. These widgets will all be augmented with a
``column_index`` attribute, which can be used to determine
which column they correspond to. This can be convenient,
e.g., when defining callbacks for bound events.
"""
return tuple(self._labels)
@property
def listboxes(self):
"""
A tuple containing the ``Tkinter.Listbox`` widgets used to
display individual columns. These widgets will all be
augmented with a ``column_index`` attribute, which can be used
to determine which column they correspond to. This can be
convenient, e.g., when defining callbacks for bound events.
"""
return tuple(self._listboxes)
#/////////////////////////////////////////////////////////////////
# Mouse & Keyboard Callback Functions
#/////////////////////////////////////////////////////////////////
def _select(self, e):
i = e.widget.nearest(e.y)
self.selection_clear(0, 'end')
self.selection_set(i)
self.activate(i)
self.focus()
def _scroll(self, delta):
for lb in self._listboxes:
lb.yview_scroll(delta, 'unit')
return 'break'
def _pagesize(self):
""":return: The number of rows that makes up one page"""
return int(self.index('@0,1000000')) - int(self.index('@0,0'))
#/////////////////////////////////////////////////////////////////
# Row selection
#/////////////////////////////////////////////////////////////////
def select(self, index=None, delta=None, see=True):
"""
Set the selected row. If ``index`` is specified, then select
row ``index``. Otherwise, if ``delta`` is specified, then move
the current selection by ``delta`` (negative numbers for up,
positive numbers for down). This will not move the selection
past the top or the bottom of the list.
:param see: If true, then call ``self.see()`` with the newly
selected index, to ensure that it is visible.
"""
if (index is not None) and (delta is not None):
raise ValueError('specify index or delta, but not both')
# If delta was given, then calculate index.
if delta is not None:
if len(self.curselection()) == 0:
index = -1 + delta
else:
index = int(self.curselection()[0]) + delta
# Clear all selected rows.
self.selection_clear(0, 'end')
# Select the specified index
if index is not None:
index = min(max(index, 0), self.size()-1)
#self.activate(index)
self.selection_set(index)
if see: self.see(index)
#/////////////////////////////////////////////////////////////////
# Configuration
#/////////////////////////////////////////////////////////////////
def configure(self, cnf={}, **kw):
"""
Configure this widget. Use ``label_*`` to configure all
labels; and ``listbox_*`` to configure all listboxes. E.g.:
>>> mlb = MultiListbox(master, 5)
>>> mlb.configure(label_foreground='red')
>>> mlb.configure(listbox_foreground='red')
"""
cnf = dict(list(cnf.items()) + list(kw.items()))
for (key, val) in list(cnf.items()):
if key.startswith('label_') or key.startswith('label-'):
for label in self._labels:
label.configure({key[6:]: val})
elif key.startswith('listbox_') or key.startswith('listbox-'):
for listbox in self._listboxes:
listbox.configure({key[8:]: val})
else:
Frame.configure(self, {key:val})
def __setitem__(self, key, val):
"""
Configure this widget. This is equivalent to
``self.configure({key,val``)}. See ``configure()``.
"""
self.configure({key:val})
def rowconfigure(self, row_index, cnf={}, **kw):
"""
Configure all table cells in the given row. Valid keyword
arguments are: ``background``, ``bg``, ``foreground``, ``fg``,
``selectbackground``, ``selectforeground``.
"""
for lb in self._listboxes: lb.itemconfigure(row_index, cnf, **kw)
def columnconfigure(self, col_index, cnf={}, **kw):
"""
Configure all table cells in the given column. Valid keyword
arguments are: ``background``, ``bg``, ``foreground``, ``fg``,
``selectbackground``, ``selectforeground``.
"""
lb = self._listboxes[col_index]
cnf = dict(list(cnf.items()) + list(kw.items()))
for (key, val) in list(cnf.items()):
if key in ('background', 'bg', 'foreground', 'fg',
'selectbackground', 'selectforeground'):
for i in range(lb.size()): lb.itemconfigure(i, {key:val})
else:
lb.configure({key:val})
def itemconfigure(self, row_index, col_index, cnf=None, **kw):
"""
Configure the table cell at the given row and column. Valid
keyword arguments are: ``background``, ``bg``, ``foreground``,
``fg``, ``selectbackground``, ``selectforeground``.
"""
lb = self._listboxes[col_index]
return lb.itemconfigure(row_index, cnf, **kw)
#/////////////////////////////////////////////////////////////////
# Value Access
#/////////////////////////////////////////////////////////////////
def insert(self, index, *rows):
"""
Insert the given row or rows into the table, at the given
index. Each row value should be a tuple of cell values, one
for each column in the row. Index may be an integer or any of
the special strings (such as ``'end'``) accepted by
``Tkinter.Listbox``.
"""
for elt in rows:
if len(elt) != len(self._column_names):
raise ValueError('rows should be tuples whose length '
'is equal to the number of columns')
for (lb,elts) in zip(self._listboxes, list(zip(*rows))):
lb.insert(index, *elts)
def get(self, first, last=None):
"""
Return the value(s) of the specified row(s). If ``last`` is
not specified, then return a single row value; otherwise,
return a list of row values. Each row value is a tuple of
cell values, one for each column in the row.
"""
values = [lb.get(first, last) for lb in self._listboxes]
if last:
return [tuple(row) for row in zip(*values)]
else:
return tuple(values)
def bbox(self, row, col):
"""
Return the bounding box for the given table cell, relative to
this widget's top-left corner. The bounding box is a tuple
of integers ``(left, top, width, height)``.
"""
dx, dy, _, _ = self.grid_bbox(row=0, column=col)
x, y, w, h = self._listboxes[col].bbox(row)
return int(x)+int(dx), int(y)+int(dy), int(w), int(h)
#/////////////////////////////////////////////////////////////////
# Hide/Show Columns
#/////////////////////////////////////////////////////////////////
def hide_column(self, col_index):
"""
Hide the given column. The column's state is still
maintained: its values will still be returned by ``get()``, and
you must supply its values when calling ``insert()``. It is
safe to call this on a column that is already hidden.
:see: ``show_column()``
"""
if self._labels:
self._labels[col_index].grid_forget()
self.listboxes[col_index].grid_forget()
self.grid_columnconfigure(col_index, weight=0)
def show_column(self, col_index):
"""
Display a column that has been hidden using ``hide_column()``.
It is safe to call this on a column that is not hidden.
"""
weight = self._column_weights[col_index]
if self._labels:
self._labels[col_index].grid(column=col_index, row=0,
sticky='news', padx=0, pady=0)
self._listboxes[col_index].grid(column=col_index, row=1,
sticky='news', padx=0, pady=0)
self.grid_columnconfigure(col_index, weight=weight)
#/////////////////////////////////////////////////////////////////
# Binding Methods
#/////////////////////////////////////////////////////////////////
def bind_to_labels(self, sequence=None, func=None, add=None):
"""
Add a binding to each ``Tkinter.Label`` widget in this
mult-column listbox that will call ``func`` in response to the
event sequence.
:return: A list of the identifiers of replaced binding
functions (if any), allowing for their deletion (to
prevent a memory leak).
"""
return [label.bind(sequence, func, add)
for label in self.column_labels]
def bind_to_listboxes(self, sequence=None, func=None, add=None):
"""
Add a binding to each ``Tkinter.Listbox`` widget in this
mult-column listbox that will call ``func`` in response to the
event sequence.
:return: A list of the identifiers of replaced binding
functions (if any), allowing for their deletion (to
prevent a memory leak).
"""
for listbox in self.listboxes:
listbox.bind(sequence, func, add)
def bind_to_columns(self, sequence=None, func=None, add=None):
"""
Add a binding to each ``Tkinter.Label`` and ``Tkinter.Listbox``
widget in this mult-column listbox that will call ``func`` in
response to the event sequence.
:return: A list of the identifiers of replaced binding
functions (if any), allowing for their deletion (to
prevent a memory leak).
"""
return (self.bind_to_labels(sequence, func, add) +
self.bind_to_listboxes(sequence, func, add))
#/////////////////////////////////////////////////////////////////
# Simple Delegation
#/////////////////////////////////////////////////////////////////
# These methods delegate to the first listbox:
def curselection(self, *args, **kwargs):
return self._listboxes[0].curselection(*args, **kwargs)
def selection_includes(self, *args, **kwargs):
return self._listboxes[0].selection_includes(*args, **kwargs)
def itemcget(self, *args, **kwargs):
return self._listboxes[0].itemcget(*args, **kwargs)
def size(self, *args, **kwargs):
return self._listboxes[0].size(*args, **kwargs)
def index(self, *args, **kwargs):
return self._listboxes[0].index(*args, **kwargs)
def nearest(self, *args, **kwargs):
return self._listboxes[0].nearest(*args, **kwargs)
# These methods delegate to each listbox (and return None):
def activate(self, *args, **kwargs):
for lb in self._listboxes: lb.activate(*args, **kwargs)
def delete(self, *args, **kwargs):
for lb in self._listboxes: lb.delete(*args, **kwargs)
def scan_mark(self, *args, **kwargs):
for lb in self._listboxes: lb.scan_mark(*args, **kwargs)
def scan_dragto(self, *args, **kwargs):
for lb in self._listboxes: lb.scan_dragto(*args, **kwargs)
def see(self, *args, **kwargs):
for lb in self._listboxes: lb.see(*args, **kwargs)
def selection_anchor(self, *args, **kwargs):
for lb in self._listboxes: lb.selection_anchor(*args, **kwargs)
def selection_clear(self, *args, **kwargs):
for lb in self._listboxes: lb.selection_clear(*args, **kwargs)
def selection_set(self, *args, **kwargs):
for lb in self._listboxes: lb.selection_set(*args, **kwargs)
def yview(self, *args, **kwargs):
for lb in self._listboxes: v = lb.yview(*args, **kwargs)
return v # if called with no arguments
def yview_moveto(self, *args, **kwargs):
for lb in self._listboxes: lb.yview_moveto(*args, **kwargs)
def yview_scroll(self, *args, **kwargs):
for lb in self._listboxes: lb.yview_scroll(*args, **kwargs)
#/////////////////////////////////////////////////////////////////
# Aliases
#/////////////////////////////////////////////////////////////////
itemconfig = itemconfigure
rowconfig = rowconfigure
columnconfig = columnconfigure
select_anchor = selection_anchor
select_clear = selection_clear
select_includes = selection_includes
select_set = selection_set
#/////////////////////////////////////////////////////////////////
# These listbox methods are not defined for multi-listbox
#/////////////////////////////////////////////////////////////////
# def xview(self, *what): pass
# def xview_moveto(self, fraction): pass
# def xview_scroll(self, number, what): pass
######################################################################
# Table
######################################################################
class Table(object):
"""
A display widget for a table of values, based on a ``MultiListbox``
widget. For many purposes, ``Table`` can be treated as a
list-of-lists. E.g., table[i] is a list of the values for row i;
and table.append(row) adds a new row with the given lits of
values. Individual cells can be accessed using table[i,j], which
refers to the j-th column of the i-th row. This can be used to
both read and write values from the table. E.g.:
>>> table[i,j] = 'hello'
The column (j) can be given either as an index number, or as a
column name. E.g., the following prints the value in the 3rd row
for the 'First Name' column:
>>> print(table[3, 'First Name'])
John
You can configure the colors for individual rows, columns, or
cells using ``rowconfig()``, ``columnconfig()``, and ``itemconfig()``.
The color configuration for each row will be preserved if the
table is modified; however, when new rows are added, any color
configurations that have been made for *columns* will not be
applied to the new row.
Note: Although ``Table`` acts like a widget in some ways (e.g., it
defines ``grid()``, ``pack()``, and ``bind()``), it is not itself a
widget; it just contains one. This is because widgets need to
define ``__getitem__()``, ``__setitem__()``, and ``__nonzero__()`` in
a way that's incompatible with the fact that ``Table`` behaves as a
list-of-lists.
:ivar _mlb: The multi-column listbox used to display this table's data.
:ivar _rows: A list-of-lists used to hold the cell values of this
table. Each element of _rows is a row value, i.e., a list of
cell values, one for each column in the row.
"""
def __init__(self, master, column_names, rows=None,
column_weights=None,
scrollbar=True, click_to_sort=True,
reprfunc=None, cnf={}, **kw):
"""
Construct a new Table widget.
:type master: Tkinter.Widget
:param master: The widget that should contain the new table.
:type column_names: list(str)
:param column_names: A list of names for the columns; these
names will be used to create labels for each column;
and can be used as an index when reading or writing
cell values from the table.
:type rows: list(list)
:param rows: A list of row values used to initialze the table.
Each row value should be a tuple of cell values, one for
each column in the row.
:type scrollbar: bool
:param scrollbar: If true, then create a scrollbar for the
new table widget.
:type click_to_sort: bool
:param click_to_sort: If true, then create bindings that will
sort the table's rows by a given column's values if the
user clicks on that colum's label.
:type reprfunc: function
:param reprfunc: If specified, then use this function to
convert each table cell value to a string suitable for
display. ``reprfunc`` has the following signature:
reprfunc(row_index, col_index, cell_value) -> str
(Note that the column is specified by index, not by name.)
:param cnf, kw: Configuration parameters for this widget's
contained ``MultiListbox``. See ``MultiListbox.__init__()``
for details.
"""
self._num_columns = len(column_names)
self._reprfunc = reprfunc
self._frame = Frame(master)
self._column_name_to_index = dict((c,i) for (i,c) in
enumerate(column_names))
# Make a copy of the rows & check that it's valid.
if rows is None: self._rows = []
else: self._rows = [[v for v in row] for row in rows]
for row in self._rows: self._checkrow(row)
# Create our multi-list box.
self._mlb = MultiListbox(self._frame, column_names,
column_weights, cnf, **kw)
self._mlb.pack(side='left', expand=True, fill='both')
# Optional scrollbar
if scrollbar:
sb = Scrollbar(self._frame, orient='vertical',
command=self._mlb.yview)
self._mlb.listboxes[0]['yscrollcommand'] = sb.set
#for listbox in self._mlb.listboxes:
# listbox['yscrollcommand'] = sb.set
sb.pack(side='right', fill='y')
self._scrollbar = sb
# Set up sorting
self._sortkey = None
if click_to_sort:
for i, l in enumerate(self._mlb.column_labels):
l.bind('<Button-1>', self._sort)
# Fill in our multi-list box.
self._fill_table()
#/////////////////////////////////////////////////////////////////
#{ Widget-like Methods
#/////////////////////////////////////////////////////////////////
# These all just delegate to either our frame or our MLB.
def pack(self, *args, **kwargs):
"""Position this table's main frame widget in its parent
widget. See ``Tkinter.Frame.pack()`` for more info."""
self._frame.pack(*args, **kwargs)
def grid(self, *args, **kwargs):
"""Position this table's main frame widget in its parent
widget. See ``Tkinter.Frame.grid()`` for more info."""
self._frame.grid(*args, **kwargs)
def focus(self):
"""Direct (keyboard) input foxus to this widget."""
self._mlb.focus()
def bind(self, sequence=None, func=None, add=None):
"""Add a binding to this table's main frame that will call
``func`` in response to the event sequence."""
self._mlb.bind(sequence, func, add)
def rowconfigure(self, row_index, cnf={}, **kw):
""":see: ``MultiListbox.rowconfigure()``"""
self._mlb.rowconfigure(row_index, cnf, **kw)
def columnconfigure(self, col_index, cnf={}, **kw):
""":see: ``MultiListbox.columnconfigure()``"""
col_index = self.column_index(col_index)
self._mlb.columnconfigure(col_index, cnf, **kw)
def itemconfigure(self, row_index, col_index, cnf=None, **kw):
""":see: ``MultiListbox.itemconfigure()``"""
col_index = self.column_index(col_index)
return self._mlb.itemconfigure(row_index, col_index, cnf, **kw)
def bind_to_labels(self, sequence=None, func=None, add=None):
""":see: ``MultiListbox.bind_to_labels()``"""
return self._mlb.bind_to_labels(sequence, func, add)
def bind_to_listboxes(self, sequence=None, func=None, add=None):
""":see: ``MultiListbox.bind_to_listboxes()``"""
return self._mlb.bind_to_listboxes(sequence, func, add)
def bind_to_columns(self, sequence=None, func=None, add=None):
""":see: ``MultiListbox.bind_to_columns()``"""
return self._mlb.bind_to_columns(sequence, func, add)
rowconfig = rowconfigure
columnconfig = columnconfigure
itemconfig = itemconfigure
#/////////////////////////////////////////////////////////////////
#{ Table as list-of-lists
#/////////////////////////////////////////////////////////////////
def insert(self, row_index, rowvalue):
"""
Insert a new row into the table, so that its row index will be
``row_index``. If the table contains any rows whose row index
is greater than or equal to ``row_index``, then they will be
shifted down.
:param rowvalue: A tuple of cell values, one for each column
in the new row.
"""
self._checkrow(rowvalue)
self._rows.insert(row_index, rowvalue)
if self._reprfunc is not None:
rowvalue = [self._reprfunc(row_index,j,v)
for (j,v) in enumerate(rowvalue)]
self._mlb.insert(row_index, rowvalue)
if self._DEBUG: self._check_table_vs_mlb()
def extend(self, rowvalues):
"""
Add new rows at the end of the table.
:param rowvalues: A list of row values used to initialze the
table. Each row value should be a tuple of cell values,
one for each column in the row.
"""
for rowvalue in rowvalues: self.append(rowvalue)
if self._DEBUG: self._check_table_vs_mlb()
def append(self, rowvalue):
"""
Add a new row to the end of the table.
:param rowvalue: A tuple of cell values, one for each column
in the new row.
"""
self.insert(len(self._rows), rowvalue)
if self._DEBUG: self._check_table_vs_mlb()
def clear(self):
"""
Delete all rows in this table.
"""
self._rows = []
self._mlb.delete(0, 'end')
if self._DEBUG: self._check_table_vs_mlb()
def __getitem__(self, index):
"""
Return the value of a row or a cell in this table. If
``index`` is an integer, then the row value for the ``index``th
row. This row value consists of a tuple of cell values, one
for each column in the row. If ``index`` is a tuple of two
integers, ``(i,j)``, then return the value of the cell in the
``i``th row and the ``j``th column.
"""
if isinstance(index, slice):
raise ValueError('Slicing not supported')
elif isinstance(index, tuple) and len(index)==2:
return self._rows[index[0]][self.column_index(index[1])]
else:
return tuple(self._rows[index])
def __setitem__(self, index, val):
"""
Replace the value of a row or a cell in this table with
``val``.
If ``index`` is an integer, then ``val`` should be a row value
(i.e., a tuple of cell values, one for each column). In this
case, the values of the ``index``th row of the table will be
replaced with the values in ``val``.
If ``index`` is a tuple of integers, ``(i,j)``, then replace the
value of the cell in the ``i``th row and ``j``th column with
``val``.
"""
if isinstance(index, slice):
raise ValueError('Slicing not supported')
# table[i,j] = val
elif isinstance(index, tuple) and len(index)==2:
i, j = index[0], self.column_index(index[1])
config_cookie = self._save_config_info([i])
self._rows[i][j] = val
if self._reprfunc is not None:
val = self._reprfunc(i, j, val)
self._mlb.listboxes[j].insert(i, val)
self._mlb.listboxes[j].delete(i+1)
self._restore_config_info(config_cookie)
# table[i] = val
else:
config_cookie = self._save_config_info([index])
self._checkrow(val)
self._rows[index] = list(val)
if self._reprfunc is not None:
val = [self._reprfunc(index,j,v) for (j,v) in enumerate(val)]
self._mlb.insert(index, val)
self._mlb.delete(index+1)
self._restore_config_info(config_cookie)
def __delitem__(self, row_index):
"""
Delete the ``row_index``th row from this table.
"""
if isinstance(index, slice):
raise ValueError('Slicing not supported')
if isinstance(row_index, tuple) and len(row_index)==2:
raise ValueError('Cannot delete a single cell!')
del self._rows[row_index]
self._mlb.delete(row_index)
if self._DEBUG: self._check_table_vs_mlb()
def __len__(self):
"""
:return: the number of rows in this table.
"""
return len(self._rows)
def _checkrow(self, rowvalue):
"""
Helper function: check that a given row value has the correct
number of elements; and if not, raise an exception.
"""
if len(rowvalue) != self._num_columns:
raise ValueError('Row %r has %d columns; expected %d' %
(rowvalue, len(rowvalue), self._num_columns))
#/////////////////////////////////////////////////////////////////
# Columns
#/////////////////////////////////////////////////////////////////
@property
def column_names(self):
"""A list of the names of the columns in this table."""
return self._mlb.column_names
def column_index(self, i):
"""
If ``i`` is a valid column index integer, then return it as is.
Otherwise, check if ``i`` is used as the name for any column;
if so, return that column's index. Otherwise, raise a
``KeyError`` exception.
"""
if isinstance(i, int) and 0 <= i < self._num_columns:
return i
else:
# This raises a key error if the column is not found.
return self._column_name_to_index[i]
def hide_column(self, column_index):
""":see: ``MultiListbox.hide_column()``"""
self._mlb.hide_column(self.column_index(column_index))
def show_column(self, column_index):
""":see: ``MultiListbox.show_column()``"""
self._mlb.show_column(self.column_index(column_index))
#/////////////////////////////////////////////////////////////////
# Selection
#/////////////////////////////////////////////////////////////////
def selected_row(self):
"""
Return the index of the currently selected row, or None if
no row is selected. To get the row value itself, use
``table[table.selected_row()]``.
"""
sel = self._mlb.curselection()
if sel: return int(sel[0])
else: return None
def select(self, index=None, delta=None, see=True):
""":see: ``MultiListbox.select()``"""
self._mlb.select(index, delta, see)
#/////////////////////////////////////////////////////////////////
# Sorting
#/////////////////////////////////////////////////////////////////
def sort_by(self, column_index, order='toggle'):
"""
Sort the rows in this table, using the specified column's
values as a sort key.
:param column_index: Specifies which column to sort, using
either a column index (int) or a column's label name
(str).
:param order: Specifies whether to sort the values in
ascending or descending order:
- ``'ascending'``: Sort from least to greatest.
- ``'descending'``: Sort from greatest to least.
- ``'toggle'``: If the most recent call to ``sort_by()``
sorted the table by the same column (``column_index``),
then reverse the rows; otherwise sort in ascending
order.
"""
if order not in ('ascending', 'descending', 'toggle'):
raise ValueError('sort_by(): order should be "ascending", '
'"descending", or "toggle".')
column_index = self.column_index(column_index)
config_cookie = self._save_config_info(index_by_id=True)
# Sort the rows.
if order == 'toggle' and column_index == self._sortkey:
self._rows.reverse()
else:
self._rows.sort(key=operator.itemgetter(column_index),
reverse=(order=='descending'))
self._sortkey = column_index
# Redraw the table.
self._fill_table()
self._restore_config_info(config_cookie, index_by_id=True, see=True)
if self._DEBUG: self._check_table_vs_mlb()
def _sort(self, event):
"""Event handler for clicking on a column label -- sort by
that column."""
column_index = event.widget.column_index
# If they click on the far-left of far-right of a column's
# label, then resize rather than sorting.
if self._mlb._resize_column(event):
return 'continue'
# Otherwise, sort.
else:
self.sort_by(column_index)
return 'continue'
#/////////////////////////////////////////////////////////////////
#{ Table Drawing Helpers
#/////////////////////////////////////////////////////////////////
def _fill_table(self, save_config=True):
"""
Re-draw the table from scratch, by clearing out the table's
multi-column listbox; and then filling it in with values from
``self._rows``. Note that any cell-, row-, or column-specific
color configuration that has been done will be lost. The
selection will also be lost -- i.e., no row will be selected
after this call completes.
"""
self._mlb.delete(0, 'end')
for i, row in enumerate(self._rows):
if self._reprfunc is not None:
row = [self._reprfunc(i,j,v) for (j,v) in enumerate(row)]
self._mlb.insert('end', row)
def _get_itemconfig(self, r, c):
return dict( (k, self._mlb.itemconfig(r, c, k)[-1])
for k in ('foreground', 'selectforeground',
'background', 'selectbackground') )
def _save_config_info(self, row_indices=None, index_by_id=False):
"""
Return a 'cookie' containing information about which row is
selected, and what color configurations have been applied.
this information can the be re-applied to the table (after
making modifications) using ``_restore_config_info()``. Color
configuration information will be saved for any rows in
``row_indices``, or in the entire table, if
``row_indices=None``. If ``index_by_id=True``, the the cookie
will associate rows with their configuration information based
on the rows' python id. This is useful when performing
operations that re-arrange the rows (e.g. ``sort``). If
``index_by_id=False``, then it is assumed that all rows will be
in the same order when ``_restore_config_info()`` is called.
"""
# Default value for row_indices is all rows.
if row_indices is None:
row_indices = list(range(len(self._rows)))
# Look up our current selection.
selection = self.selected_row()
if index_by_id and selection is not None:
selection = id(self._rows[selection])
# Look up the color configuration info for each row.
if index_by_id:
config = dict((id(self._rows[r]), [self._get_itemconfig(r, c)
for c in range(self._num_columns)])
for r in row_indices)
else:
config = dict((r, [self._get_itemconfig(r, c)
for c in range(self._num_columns)])
for r in row_indices)
return selection, config
def _restore_config_info(self, cookie, index_by_id=False, see=False):
"""
Restore selection & color configuration information that was
saved using ``_save_config_info``.
"""
selection, config = cookie
# Clear the selection.
if selection is None:
self._mlb.selection_clear(0, 'end')
# Restore selection & color config
if index_by_id:
for r, row in enumerate(self._rows):
if id(row) in config:
for c in range(self._num_columns):
self._mlb.itemconfigure(r, c, config[id(row)][c])
if id(row) == selection:
self._mlb.select(r, see=see)
else:
if selection is not None:
self._mlb.select(selection, see=see)
for r in config:
for c in range(self._num_columns):
self._mlb.itemconfigure(r, c, config[r][c])
#/////////////////////////////////////////////////////////////////
# Debugging (Invariant Checker)
#/////////////////////////////////////////////////////////////////
_DEBUG = False
"""If true, then run ``_check_table_vs_mlb()`` after any operation
that modifies the table."""
def _check_table_vs_mlb(self):
"""
Verify that the contents of the table's ``_rows`` variable match
the contents of its multi-listbox (``_mlb``). This is just
included for debugging purposes, to make sure that the
list-modifying operations are working correctly.
"""
for col in self._mlb.listboxes:
assert len(self) == col.size()
for row in self:
assert len(row) == self._num_columns
assert self._num_columns == len(self._mlb.column_names)
#assert self._column_names == self._mlb.column_names
for i, row in enumerate(self):
for j, cell in enumerate(row):
if self._reprfunc is not None:
cell = self._reprfunc(i, j, cell)
assert self._mlb.get(i)[j] == cell
######################################################################
# Demo/Test Function
######################################################################
# update this to use new WordNet API
def demo():
root = Tk()
root.bind('<Control-q>', lambda e: root.destroy())
table = Table(root, 'Word Synset Hypernym Hyponym'.split(),
column_weights=[0, 1, 1, 1],
reprfunc=(lambda i,j,s: ' %s' % s))
table.pack(expand=True, fill='both')
from nltk.corpus import wordnet
from nltk.corpus import brown
for word, pos in sorted(set(brown.tagged_words()[:500])):
if pos[0] != 'N': continue
word = word.lower()
for synset in wordnet.synsets(word):
hyper = (synset.hypernyms()+[''])[0]
hypo = (synset.hyponyms()+[''])[0]
table.append([word,
getattr(synset, 'definition', '*none*'),
getattr(hyper, 'definition', '*none*'),
getattr(hypo, 'definition', '*none*')])
table.columnconfig('Word', background='#afa')
table.columnconfig('Synset', background='#efe')
table.columnconfig('Hypernym', background='#fee')
table.columnconfig('Hyponym', background='#ffe')
for row in range(len(table)):
for column in ('Hypernym', 'Hyponym'):
if table[row, column] == '*none*':
table.itemconfig(row, column, foreground='#666',
selectforeground='#666')
root.mainloop()
if __name__ == '__main__':
demo()
|
the-stack_106_29170 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import torch.utils.model_zoo as model_zoo
import torch
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3mb4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=3):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2,2,2,2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [3,4,6,3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3,4,6,3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3,4,23,3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3,8,36,3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
the-stack_106_29172 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sympy
import cirq
def test_product_duplicate_keys():
with pytest.raises(ValueError):
_ = cirq.Linspace('a', 0, 9, 10) * cirq.Linspace('a', 0, 10, 11)
def test_zip_duplicate_keys():
with pytest.raises(ValueError):
_ = cirq.Linspace('a', 0, 9, 10) * cirq.Linspace('a', 0, 10, 11)
def test_linspace():
sweep = cirq.Linspace('a', 0.34, 9.16, 7)
assert len(sweep) == 7
params = list(sweep.param_tuples())
assert len(params) == 7
assert params[0] == (('a', 0.34),)
assert params[-1] == (('a', 9.16),)
def test_linspace_one_point():
sweep = cirq.Linspace('a', 0.34, 9.16, 1)
assert len(sweep) == 1
params = list(sweep.param_tuples())
assert len(params) == 1
assert params[0] == (('a', 0.34),)
def test_linspace_sympy_symbol():
a = sympy.Symbol('a')
sweep = cirq.Linspace(a, 0.34, 9.16, 7)
assert len(sweep) == 7
params = list(sweep.param_tuples())
assert len(params) == 7
assert params[0] == (('a', 0.34),)
assert params[-1] == (('a', 9.16),)
def test_points():
sweep = cirq.Points('a', [1, 2, 3, 4])
assert len(sweep) == 4
params = list(sweep)
assert len(params) == 4
def test_zip():
sweep = cirq.Points('a', [1, 2, 3]) + cirq.Points('b', [4, 5, 6, 7])
assert len(sweep) == 3
assert _values(sweep, 'a') == [1, 2, 3]
assert _values(sweep, 'b') == [4, 5, 6]
def test_product():
sweep = cirq.Points('a', [1, 2, 3]) * cirq.Points('b', [4, 5, 6, 7])
assert len(sweep) == 12
assert _values(sweep, 'a') == [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]
assert _values(sweep, 'b') == [4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7]
def _values(sweep, key):
p = sympy.Symbol(key)
return [resolver.value_of(p) for resolver in sweep]
def test_equality():
et = cirq.testing.EqualsTester()
et.add_equality_group(cirq.UnitSweep, cirq.UnitSweep)
# Simple sweeps with the same key are equal to themselves, but different
# from each other even if they happen to contain the same points.
et.make_equality_group(lambda: cirq.Linspace('a', 0, 10, 11))
et.make_equality_group(lambda: cirq.Linspace('b', 0, 10, 11))
et.make_equality_group(lambda: cirq.Points('a', list(range(11))))
et.make_equality_group(lambda: cirq.Points('b', list(range(11))))
# Product and Zip sweeps can also be equated.
et.make_equality_group(
lambda: cirq.Linspace('a', 0, 5, 6) * cirq.Linspace('b', 10, 15, 6))
et.make_equality_group(
lambda: cirq.Linspace('a', 0, 5, 6) + cirq.Linspace('b', 10, 15, 6))
et.make_equality_group(
lambda: cirq.Points('a', [1, 2]) *
(cirq.Linspace('b', 0, 5, 6) +
cirq.Linspace('c', 10, 15, 6)))
def test_repr():
assert repr(cirq.study.sweeps.Product(cirq.UnitSweep)) == \
'cirq.study.sweeps.Product(cirq.UnitSweep)'
assert repr(cirq.study.sweeps.Zip(cirq.UnitSweep)) == \
'cirq.study.sweeps.Zip(cirq.UnitSweep)'
|
the-stack_106_29173 | import json # https://docs.python.org/3/library/csv.html
# https://django-extensions.readthedocs.io/en/latest/runscript.html
import os
from index.models import Game_Model, Image_Model
def run():
fileNumber = 7
while fileNumber <= 7:
with open(f'res/data_{fileNumber}.json', "r") as f:
data = json.load(f)
for i in data:
# print("-------------------------------------------------------------------------")
id, cover, genres, name, platforms, developers = None, None, None, None, None, None
if 'id' in i:
id = i['id']
if 'cover' in i:
cover = i['cover']
if 'genres' in i:
genres = i["genres"]
if 'name' in i:
name = i['name']
if 'platforms' in i:
platforms = i['platforms']
if 'involved_companies' in i:
developers = i['involved_companies']
print("Game Uploaded: ", name, "- Platforms:", platforms)
img, created = Image_Model.objects.get_or_create(img_id=id,img_url=cover)
game = Game_Model(game_id=int(id), game_title=name,genres={"genres":genres},developers={"developers":developers},platforms ={"platforms":platforms}, img_id=img)
game.save()
fileNumber+=1
|
the-stack_106_29175 | # -*- coding: utf-8 -*-
import lxml.etree
import six
import zeit.content.article.article
import zeit.wochenmarkt.interfaces
import zeit.wochenmarkt.testing
import zope.component
class TestRecipeCategoriesWhitelist(
zeit.wochenmarkt.testing.FunctionalTestCase):
def test_category_should_be_found_through_xml(self):
categories = zope.component.getUtility(
zeit.wochenmarkt.interfaces.IRecipeCategoriesWhitelist)._load()
pizza = dict(categories.items()).get('pizza')
assert 'Pizza' == pizza.name
def test_category_should_be_found_by_id(self):
bowl = zope.component.getUtility(
zeit.wochenmarkt.interfaces.IRecipeCategoriesWhitelist).get('bowl')
assert 'Bowl' == bowl.name
def test_autocomplete_should_be_available_for_categrories(self):
result = zope.component.getUtility(
zeit.wochenmarkt.interfaces.IRecipeCategoriesWhitelist).search('B')
assert 2 == len(result)
names = []
for item in result:
names.append(item.name)
assert u'Barbecue' in names
class TestRecipeCategories(
zeit.wochenmarkt.testing.FunctionalTestCase,
zeit.wochenmarkt.testing.RecipeCategoriesHelper):
def get_content(self):
from zeit.wochenmarkt.categories import RecipeCategories
from lxml import objectify
class Content(object):
categories = RecipeCategories()
xml = objectify.fromstring('<article><head/></article>')
return Content()
def test_set_should_add_new_categories(self):
categories = self.setup_categories('summer', 'pizza')
summer = categories['summer']
pizza = categories['pizza']
content = self.get_content()
content.categories = [summer, pizza]
result = content.categories
self.assertEqual(['summer', 'pizza'], [x.code for x in result])
def test_set_should_add_duplicate_values_only_once(self):
categories = self.setup_categories('summer')
summer = categories['summer']
content = self.get_content()
content.categories = [summer, summer]
result = content.categories
self.assertEqual(['summer'], [x.code for x in result])
def test_set_should_write_categories_to_xml_head(self):
categories = self.setup_categories('summer')
summer = categories['summer']
content = self.get_content()
content.categories = [summer]
self.assertEllipsis(
'<recipe_categories...><category code="summer"/>...',
lxml.etree.tostring(
content.xml.head.recipe_categories,
encoding=six.text_type))
def test_removing_all_categories_should_leave_no_trace(self):
categories = self.setup_categories('summer')
summer = categories['summer']
content = self.get_content()
content.categories = [summer]
self.assertEqual(1, len(content.xml.xpath('//recipe_categories')))
content.categories = []
self.assertEqual(0, len(content.xml.xpath('//recipe_categories')))
def test_unavailable_categories_should_just_be_skipped(self):
categories = self.setup_categories('servomotoren', 'pizza')
servomotoren = categories['servomotoren']
pizza = categories['pizza']
content = self.get_content()
content.categories = [servomotoren, pizza]
result = content.categories
self.assertEqual(['pizza'], [x.code for x in result])
|
the-stack_106_29179 | import os
import time
import random
import ujson as json
from typing import List, Set
from collections import OrderedDict
from nltk.tokenize import word_tokenize
from multiprocessing import Manager
stop_words = set(corpus.stopwords.words('english'))
random.seed(22)
base_url = 'https://en.wikipedia.org/?curid={}'
class DisjointSet:
def __init__(self, n, comp):
self.parent = [i for i in range(n)]
self.rank = [0 for _ in range(n)]
self.comp = comp
self.n = n
def run(self):
for i in range(self.n):
for j in range(i + 1, self.n):
if self.comp(i, j):
self.union(i, j)
def find(self, x):
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, x, y):
xroot = self.find(x)
yroot = self.find(y)
if xroot == yroot:
return
if self.rank[xroot] < self.rank[yroot]:
self.parent[xroot] = yroot
elif self.rank[xroot] > self.rank[yroot]:
self.parent[yroot] = xroot
else:
self.parent[yroot] = xroot
self.rank[xroot] += 1
def acronym(phrase: str, stopwords: Set[str], ner=None):
cap = lambda x: x[0].upper() + x[1:]
def with_dot(l):
s = set()
for x in l:
s.update({x, x + '.'})
return s
time_name = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october',
'november', 'december', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
if phrase.lower() in time_name:
abbr = with_dot([cap(phrase[:3])])
if phrase.lower() == 'september':
abbr.update(with_dot(['Sept']))
elif phrase.lower() == 'tuesday':
abbr.update(with_dot(['Tu', 'Tues']))
elif phrase.lower() == 'thursday':
abbr.update(with_dot(['Thur', 'Thurs', 'Th']))
return abbr
abbr = {phrase}
words = word_tokenize(phrase)
stand_form = []
for w in words:
if w in stopwords:
stand_form.append(w.lower())
else:
stand_form.append(cap(w.lower()))
abbr.add(' '.join(stand_form))
words = [w for w in words if w not in stopwords]
# if len(words) == 1 or ner == 'PERSON':
# return abbr
# first_cap_letters = [w[0].upper() for w in words]
# abbr.add('.'.join(first_cap_letters))
# abbr.add(''.join(first_cap_letters))
# abbr.add('.'.join(first_cap_letters)+'.')
return abbr
# ------------------- parsing and linking section -------------------
def link(sents):
mentions = []
for i_s, s in enumerate(sents):
if s['text'] != '.':
for i_m, m in enumerate(s['mentions']):
mentions.append(Mention(m['start'], m['end'], s['text'], m['ner'], m['text'], i_s, i_m))
abbr = acronym(m['text'], stop_words, ner=m['ner'])
mentions[-1].alias = abbr
def get_entities(mention: Mention):
mset = mention.alias
for m in mset:
ment_info = share_src.ment_ent_dict.get(m)
if ment_info:
mention.total_num += ment_info['total']
for eid, freq_name in ment_info['entities'].items():
eid = int(eid)
freq = freq_name['freq']
name = freq_name['name']
if eid in mention.candidates:
mention.candidates[eid].freq = max(mention.candidates[eid].freq, freq)
else:
mention.candidates[eid] = Entity(eid, name, freq)
true_ent = share_src.redirects.get(m, None)
if true_ent:
eid, name = int(true_ent['id']), true_ent['name']
if eid in mention.candidates:
mention.candidates[eid].freq = 1.0
else:
mention.candidates[eid] = Entity(eid, name, 1.0)
cands = mention.candidates.items()
cands = sorted(cands, key=lambda x: x[1].freq, reverse=True)
mention.candidates = [c[1] for c in cands]
# only for names
def coref(mentions: List[Mention]):
mention_person = []
for m in mentions:
if m.ner == 'PERSON':
mention_person.append(m)
elif len(m.candidates) > 0:
highest_candidate: Entity = m.candidates[0]
if highest_candidate.name in share_src.persons:
mention_person.append(m)
# mention_person = sorted(mention_person,lambda x:len(x.text))
mention_num = len(mention_person)
def is_same_person(i1, i2):
if i1 == i2:
return True
m1, m2 = mention_person[i1].text, mention_person[i2].text
return str_contain(m1, m2) or str_contain(m2, m1)
dset = DisjointSet(mention_num, is_same_person)
dset.run()
# candidate implement
person_cand = {}
for k in set(dset.parent):
person_cand[k] = {}
for i_m, m in enumerate(mention_person):
label = dset.parent[i_m]
for ent in m.candidates:
eid = ent.id
if eid in person_cand[label]:
person_cand[label][eid].update(ent)
else:
person_cand[label][eid] = ent
for i_m, m in enumerate(mention_person):
label = dset.parent[i_m]
tmp = person_cand[label].items()
tmp = sorted(tmp, key=lambda x: x[1].freq, reverse=True)
m.candidates = [t[1] for t in tmp]
for m in mentions:
get_entities(m)
coref(mentions)
for m in mentions:
if m.candidates is None or len(m.candidates) <= 0:
continue
sent_id, ment_id = m.sent_id, m.ment_id
ment = sents[sent_id]['mentions'][ment_id]
cands = [m.candidates[0]]
# maybe more than one candidates have frequency = 1
if abs(cands[0].freq - 1.0) < 1e-4:
for i in range(1, len(m.candidates)):
if abs(m.candidates[i].freq - 1.0) < 1e-4:
if m.candidates[i].id not in share_src.disambiguation_id2name:
cands.append(m.candidates[i])
else:
pass
else:
break
if cands[0].id in share_src.disambiguation_id2name:
if len(cands) == 1:
continue
else:
cands = cands[1:]
rand_int = random.randrange(len(cands))
ment['link'] = base_url.format(cands[rand_int].id)
ment['entity'] = cands[rand_int].name
def write_big_dict(fn, dic, limit=20):
with open(fn, 'w') as fw:
item = {}
for k, v in dic.items():
item[k] = v
if len(item) == limit:
fw.write(json.dumps(item) + '\n')
item = {}
if item != {}:
fw.write(json.dumps(item))
def read_big_dict(fn):
dic = {}
for i_line, line in enumerate(open(fn)):
line = line.strip()
for k, v in json.loads(line).items():
item = {'total': v['total'], 'entities': OrderedDict()}
# v = {'total':10,'entities':{51:{'freq':,'name':}}
for eid, ent in v['entities'].items():
item['entities'][int(eid)] = ent
dic[k] = v
return dic
def write_ment_ent_dict(sources: List[str], stopwords):
ment_ent_dict = {}
def read_src(fn: str):
for i_l, line in enumerate(open(fn).readlines()):
line = line.strip()
items = line.split('\t')
mention = items[0]
if items[1].isdigit():
total_num = int(items[1])
else:
continue
if total_num >= 1:
ment_info = ment_ent_dict.get(mention, None)
if ment_info is None:
ment_ent_dict[mention] = {'total': total_num, 'entities': {}}
ment_info = ment_ent_dict[mention]
else:
ment_info['total'] += total_num
ent_num = len(items[2:])
for ent in items[2:]:
ent_items = ent.split(',')
if not ent_items[1].isdigit() or (ent_items[1].isdigit() and len(ent_items) == 2):
eid = int(ent_items[0])
name = ','.join(ent_items[1:])
num = total_num / ent_num
else:
if ent_items[0].isdigit() and ent_items[1].isdigit():
eid, num = int(ent_items[0]), int(ent_items[1])
else:
print('line:{} does not have right ents'.format(line))
name = ','.join(ent_items[2:])
if eid in ment_info['entities']:
prev_freq = ment_info['entities'][eid]['freq']
ment_info['entities'][eid]['freq'] = min((1.0, prev_freq + num / total_num))
else:
ment_info['entities'][eid] = {'freq': num / total_num, 'name': name}
for src in sources:
read_src(src)
for ment, item in ment_ent_dict.items():
ents = list(item['entities'].items())
ents = sorted(ents, key=lambda x: x[1]['freq'], reverse=True)
ordered_ents = OrderedDict()
for eid, ent in ents:
ordered_ents[eid] = ent
item['entities'] = ordered_ents
start = time.time()
write_big_dict('/home/hkeaa/ment_ent.dict', ment_ent_dict)
print('write dict cost {:.2f}s'.format(time.time() - start))
class Mention:
def __init__(self, start, end, ctx, ner, text, sent_id, ment_id, link=None):
self.start = start
self.end = end
self.ctx = ctx
self.ner = ner
self.text = text
self.link = link
self.total_num = 0
self.alias = None
self.sent_id = sent_id
self.ment_id = ment_id
self.candidates = {}
def __str__(self):
return '{}_{}'.format(self.text, self.ner)
def __repr__(self):
return '{}_{}'.format(self.text, self.ner)
class Entity:
# wiki id, name, freq
def __init__(self, wid, name, freq):
self.id = wid
self.name = name
self.freq = freq
def __str__(self):
return '{}_{}'.format(self.name, self.id)
def __repr__(self):
return '{}_{}'.format(self.name, self.id)
def __hash__(self):
return self.id
def __eq__(self, other):
return self.id == other.id
def update(self, other):
self.freq = (self.freq + other.freq) / 2
def read_dict_from_dir(path):
dic = {}
for fn in os.listdir(path):
if fn.endswith('.dict'):
tmp = read_big_dict(os.path.join(path, fn))
dic = {**dic, **tmp}
print('\tread {} done'.format(fn))
# TODO remove break to read full dictionary
# break
return dic
def str_contain(m: str, n: str):
if m == n:
return True
start = m.find(n)
if start == -1:
return False
end = start + len(n) - 1
# print(m, n, start, end)
if (start == 0 or m[start - 1] == ' ') and (end == len(m) - 1 or m[end + 1] == ' '):
return True
return False
class LinkSharedSource:
def __init__(self, disam_fn, redirect_fn, ment_ent_fn, person_fn):
# self.manager = manager
# process_safe = lambda x: self.manager.dict(x)
# disambiguation dict : id -- name
self.disambiguation_id2name, self.disambiguation_name2id = self.__measure_speed(disam_fn,
self.__read_disambiguation,
'disambiguation')
# self.disambiguation_id2name = process_safe(self.disambiguation_id2name)
# self.disambiguation_name2id = process_safe(self.disambiguation_name2id)
# redirect dict: alias -- {id, name}
self.redirects = self.__measure_speed(redirect_fn, self.__read_redirects, 'redirect')
# ment_ent dict: mention -- {total:int,alias:List[str],entities:{id:{freq:float,name:str}}
# self.ment_ent_dict = process_safe(self.__measure_speed(ment_ent_fn,read_big_dict,'mention-entity dictionary'))
start = time.time()
self.ment_ent_dict = read_dict_from_dir(ment_ent_fn)
print('read mention-entity dictionary cost {:.02}s'.format(time.time() - start))
self.persons = self.__measure_speed(person_fn, self.__read_person, 'person names')
def __measure_speed(self, fn, func, name):
start = time.time()
item = func(fn)
print('read {} cost {:.2f}s'.format(name, time.time() - start))
return item
@staticmethod
def __read_person(fn):
persons = []
for line in open(fn):
persons.append(line.strip())
# persons[line.strip()] = None
# persons.add(line.strip())
return persons
@staticmethod
def __read_disambiguation(fn):
disam_id2nme = {}
disam_nme2id = {}
for line in open(fn):
line = line.strip()
wid, name = line.split('\t')
disam_id2nme[int(wid)] = name
disam_nme2id[name] = int(wid)
return disam_id2nme, disam_nme2id
@staticmethod
def __read_redirects(fn):
redirects = {}
for line in open(fn):
alias, name, id = line.strip().split('\t')
redirects[alias] = {'name': name, 'id': id}
return redirects
def main():
conll_root = '/home/data/corpora/nytimes/nyt_preprocess/conll'
parsed_root = '/home/data/corpora/nytimes/nyt_preprocess/parsed'
disam_fn = '/home/hkeaa/data/nel/basic_data/wiki_disambiguation_pages.txt'
name_id_fn = '/home/hkeaa/data/nel/basic_data/wiki_name_id_map.txt'
redirect_fn = '/home/hkeaa/data/nel/basic_data/wiki_redirects.txt'
ment_ent_fn = '/home/hkeaa/ment_ent.dict'
person_fn = '/home/hkeaa/data/nel/basic_data/p_e_m_data/persons.txt'
share_src = LinkSharedSource(disam_fn, redirect_fn, ment_ent_fn, person_fn)
# task = Task({'fn':os.path.j)})
# link(task,share_src)
# pem_dir = '/home/hkeaa/data/nel/generated'
# crosswiki_wikipedia = os.path.join(pem_dir,'crosswikis_wikipedia_p_e_m.txt')
# yago = os.path.join(pem_dir,'yago_p_e_m.txt')
# write_ment_ent_dict([crosswiki_wikipedia],set(corpus.stopwords.words('english')))
if __name__ == '__main__':
main()
|
the-stack_106_29180 | import errno
import os
import gevent.socket
import six
from geventhttpclient import __version__
from geventhttpclient.connectionpool import ConnectionPool
from geventhttpclient.header import Headers
from geventhttpclient.response import HTTPConnectionClosed
from geventhttpclient.response import HTTPSocketPoolResponse
from geventhttpclient.url import URL
CRLF = "\r\n"
WHITESPACE = " "
FIELD_VALUE_SEP = ": "
HOST_PORT_SEP = ":"
SLASH = "/"
PROTO_HTTP = "http"
PROTO_HTTPS = "https"
HEADER_HOST = "Host"
HEADER_CONTENT_LENGTH = "Content-Length"
METHOD_GET = "GET"
METHOD_HEAD = "HEAD"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
METHOD_DELETE = "DELETE"
METHOD_PATCH = "PATCH"
METHOD_OPTIONS = "OPTIONS"
METHOD_TRACE = "TRACE"
def _get_body_length(body):
"""
Get len of string or file
:param body:
:return:
:rtype: int
"""
try:
return len(body)
except TypeError:
try:
return os.fstat(body.fileno()).st_size
except (AttributeError, OSError):
return None
class HTTPClient(object):
HTTP_11 = 'HTTP/1.1'
HTTP_10 = 'HTTP/1.0'
BLOCK_SIZE = 1024 * 4 # 4KB
DEFAULT_HEADERS = Headers({
'User-Agent': 'python/gevent-http-client-' + __version__
})
@classmethod
def from_url(cls, url, **kw):
if not isinstance(url, URL):
url = URL(url)
enable_ssl = url.scheme == PROTO_HTTPS
if not enable_ssl:
kw.pop('ssl_options', None)
return cls(url.host, port=url.port, ssl=enable_ssl, **kw)
def __init__(self, host, port=None, headers=None,
block_size=BLOCK_SIZE,
connection_timeout=ConnectionPool.DEFAULT_CONNECTION_TIMEOUT,
network_timeout=ConnectionPool.DEFAULT_NETWORK_TIMEOUT,
disable_ipv6=False,
concurrency=1,
ssl=False, ssl_options=None, ssl_context_factory=None,
insecure=False,
proxy_host=None, proxy_port=None, version=HTTP_11,
headers_type=Headers):
if headers is None:
headers = {}
self.host = host
self.port = port
connection_host = self.host
connection_port = self.port
if proxy_host is not None:
assert proxy_port is not None, \
'you have to provide proxy_port if you set proxy_host'
self.use_proxy = True
connection_host = proxy_host
connection_port = proxy_port
else:
self.use_proxy = False
if ssl:
ssl_options = ssl_options.copy() if ssl_options else {}
if ssl_options is not None:
if ssl_context_factory is not None:
requested_hostname = headers.get('host', self.host)
ssl_options.setdefault('server_hostname', requested_hostname)
self.ssl = True
if not self.port:
self.port = 443
if not connection_port:
connection_port = self.port
# Import SSL as late as possible, fail hard with Import Error
from geventhttpclient.connectionpool import SSLConnectionPool
self._connection_pool = SSLConnectionPool(
connection_host, connection_port,
self.host, self.port,
size=concurrency,
ssl_options=ssl_options,
ssl_context_factory=ssl_context_factory,
insecure=insecure,
network_timeout=network_timeout,
connection_timeout=connection_timeout,
disable_ipv6=disable_ipv6,
use_proxy=self.use_proxy
)
else:
self.ssl = False
if not self.port:
self.port = 80
if not connection_port:
connection_port = self.port
self._connection_pool = ConnectionPool(
connection_host, connection_port,
self.host, self.port,
size=concurrency,
network_timeout=network_timeout,
connection_timeout=connection_timeout,
disable_ipv6=disable_ipv6,
use_proxy=self.use_proxy
)
self.version = version
self.headers_type = headers_type
self.default_headers = headers_type()
self.default_headers.update(self.DEFAULT_HEADERS)
self.default_headers.update(headers)
self.block_size = block_size
self._base_url_string = str(self.get_base_url())
def get_base_url(self):
url = URL()
url.host = self.host
url.port = self.port
url.scheme = self.ssl and PROTO_HTTPS or PROTO_HTTP
return url
def close(self):
self._connection_pool.close()
# Like urllib2, try to treat the body as a file if we can't determine the
# file length with `len()`
def _build_request(self, method, request_uri, body="", headers=None):
"""
:param method:
:type method: basestring
:param request_uri:
:type request_uri: basestring
:param body:
:type body: basestring or file
:param headers:
:type headers: dict
:return:
:rtype: basestring
"""
if headers is None:
headers = {}
header_fields = self.headers_type()
header_fields.update(self.default_headers)
header_fields.update(headers)
if self.version == self.HTTP_11 and HEADER_HOST not in header_fields:
host_port = self.host
if self.port not in (80, 443):
host_port += HOST_PORT_SEP + str(self.port)
header_fields[HEADER_HOST] = host_port
if body and HEADER_CONTENT_LENGTH not in header_fields:
body_length = _get_body_length(body)
if body_length:
header_fields[HEADER_CONTENT_LENGTH] = body_length
request_url = request_uri
if self.use_proxy:
base_url = self._base_url_string
if request_uri.startswith(SLASH):
base_url = base_url[:-1]
request_url = base_url + request_url
elif not request_url.startswith((SLASH, PROTO_HTTP)):
request_url = SLASH + request_url
elif request_url.startswith(PROTO_HTTP):
if request_url.startswith(self._base_url_string):
request_url = request_url[len(self._base_url_string) - 1:]
else:
raise ValueError("Invalid host in URL")
request = method + WHITESPACE + request_url + WHITESPACE + self.version + CRLF
for field, value in header_fields.items():
request += field + FIELD_VALUE_SEP + str(value) + CRLF
request += CRLF
return request
def request(self, method, request_uri, body=b"", headers=None):
"""
:param method:
:param request_uri:
:param body: byte or file
:param headers:
:return:
"""
if isinstance(body, six.text_type):
body = body.encode('utf-8')
request = self._build_request(
method.upper(), request_uri, body=body, headers=headers)
attempts_left = self._connection_pool.size + 1
while 1:
sock = self._connection_pool.get_socket()
try:
_request = request.encode()
if body:
if isinstance(body, six.binary_type):
sock.sendall(_request + body)
else:
sock.sendall(_request)
# TODO: Support non file-like iterables, e.g. `(u"string1", u"string2")`.
if six.PY3:
sock.sendfile(body)
else:
while True:
chunk = body.read(65536)
if not chunk:
break
sock.sendall(chunk)
else:
sock.sendall(_request)
except gevent.socket.error as e:
self._connection_pool.release_socket(sock)
if (e.errno == errno.ECONNRESET or e.errno == errno.EPIPE) and attempts_left > 0:
attempts_left -= 1
continue
raise e
try:
response = HTTPSocketPoolResponse(sock, self._connection_pool,
block_size=self.block_size, method=method.upper(), headers_type=self.headers_type)
except HTTPConnectionClosed as e:
# connection is released by the response itself
if attempts_left > 0:
attempts_left -= 1
continue
raise e
else:
response._sent_request = request
return response
def get(self, request_uri, headers={}):
return self.request(METHOD_GET, request_uri, headers=headers)
def head(self, request_uri, headers=None):
return self.request(METHOD_HEAD, request_uri, headers=headers)
def post(self, request_uri, body=u'', headers=None):
return self.request(METHOD_POST, request_uri, body=body, headers=headers)
def put(self, request_uri, body=u'', headers=None):
return self.request(METHOD_PUT, request_uri, body=body, headers=headers)
def delete(self, request_uri, body=u'', headers=None):
return self.request(METHOD_DELETE, request_uri, body=body, headers=headers)
def patch(self, request_uri, body=u'', headers=None):
return self.request(METHOD_PATCH, request_uri, body=body, headers=headers)
def trace(self, request_uri, body=u'', headers=None):
return self.request(METHOD_TRACE, request_uri, body=body, headers=headers)
def options(self, request_uri, headers=None):
return self.request(METHOD_OPTIONS, request_uri, headers=headers)
class HTTPClientPool(object):
""" Factory for maintaining a bunch of clients, one per host:port """
# TODO: Add some housekeeping and cleanup logic
def __init__(self, **kwargs):
self.clients = dict()
self.client_args = kwargs
def get_client(self, url):
if not isinstance(url, URL):
url = URL(url)
client_key = url.host, url.port
try:
return self.clients[client_key]
except KeyError:
client = HTTPClient.from_url(url, **self.client_args)
self.clients[client_key] = client
return client
def close(self):
for client in self.clients.values():
client.close()
|
the-stack_106_29182 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""End-to-end test for the streaming wordcount example.
Important: End-to-end test infrastructure for streaming pipeine in Python SDK
is in development and is not yet available for use.
Currently, this test blocks until the job is manually terminated.
"""
import datetime
import logging
import random
import unittest
import uuid
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.examples import streaming_wordcount
from apache_beam.io.gcp.tests.pubsub_matcher import PubSubMessageMatcher
from apache_beam.runners.runner import PipelineState
from apache_beam.testing import test_utils
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
INPUT_TOPIC = 'wc_topic_input'
OUTPUT_TOPIC = 'wc_topic_output'
INPUT_SUB = 'wc_subscription_input'
OUTPUT_SUB = 'wc_subscription_output'
DEFAULT_INPUT_NUMBERS = 500
class StreamingWordCountIT(unittest.TestCase):
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.project = self.test_pipeline.get_option('project')
self.uuid = str(uuid.uuid4())
# Set up PubSub environment.
from google.cloud import pubsub
self.pubsub_client = pubsub.Client(project=self.project)
self.input_topic = self.pubsub_client.topic(INPUT_TOPIC + self.uuid)
self.output_topic = self.pubsub_client.topic(OUTPUT_TOPIC + self.uuid)
self.input_sub = self.input_topic.subscription(INPUT_SUB + self.uuid)
self.output_sub = self.output_topic.subscription(OUTPUT_SUB + self.uuid)
self.input_topic.create()
self.output_topic.create()
test_utils.wait_for_topics_created([self.input_topic, self.output_topic])
self.input_sub.create()
self.output_sub.create()
def _generate_identifier(self):
seed = random.randint(0, 999)
current_time = datetime.datetime.now().strftime('%m%d%H%M%S')
return '%s%d' % (current_time, seed)
def _inject_numbers(self, topic, num_messages):
"""Inject numbers as test data to PubSub."""
logging.debug('Injecting %d numbers to topic %s',
num_messages, topic.full_name)
for n in range(num_messages):
topic.publish(str(n))
def _cleanup_pubsub(self):
test_utils.cleanup_subscriptions([self.input_sub, self.output_sub])
test_utils.cleanup_topics([self.input_topic, self.output_topic])
def tearDown(self):
self._cleanup_pubsub()
@attr('IT')
def test_streaming_wordcount_it(self):
# Build expected dataset.
expected_msg = [('%d: 1' % num) for num in range(DEFAULT_INPUT_NUMBERS)]
# Set extra options to the pipeline for test purpose
state_verifier = PipelineStateMatcher(PipelineState.RUNNING)
pubsub_msg_verifier = PubSubMessageMatcher(self.project,
OUTPUT_SUB + self.uuid,
expected_msg,
timeout=400)
extra_opts = {'input_subscription': self.input_sub.full_name,
'output_topic': self.output_topic.full_name,
'on_success_matcher': all_of(state_verifier,
pubsub_msg_verifier)}
# Generate input data and inject to PubSub.
test_utils.wait_for_subscriptions_created([self.input_sub])
self._inject_numbers(self.input_topic, DEFAULT_INPUT_NUMBERS)
# Get pipeline options from command argument: --test-pipeline-options,
# and start pipeline job by calling pipeline main function.
streaming_wordcount.run(
self.test_pipeline.get_full_options_as_args(**extra_opts))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
the-stack_106_29183 | from typing import Union
class Vehicle:
def __init__(self, type: str, model: str, price: int) -> None:
self.type: str = type
self.model: str = model
self.price: int = price
self.owner: Union[str, None] = None
def buy(self, money: int, owner: str) -> str:
if self.owner == None and self.price <= money:
self.owner = owner
return f'Successfully bought a {self.type}. Change: {money - self.price:.2f}'
elif self.owner != None:
return 'Car already sold'
elif self.price > money:
return 'Sorry, not enough money'
def sell(self) -> Union[str, None]:
if self.owner == None:
return 'Vehicle has no owner'
else:
self.owner = None
def __repr__(self) -> str:
if self.owner == None:
return f'{self.model} {self.type} is on sale: {self.price}'
else:
return f'{self.model} {self.type} is owned by: {self.owner}'
# vehicle_type = "car"
# model = "BMW"
# price = 30000
# vehicle = Vehicle(vehicle_type,
# model, price)
# print(vehicle.buy(15000, "Peter"))
# print(vehicle.buy(35000, "George"))
# print(vehicle)
# vehicle.sell()
# print(vehicle)
|
the-stack_106_29184 | # encoding = utf-8
import os
import pdb
import time
import numpy as np
import torch
from torch import optim
from torch.autograd import Variable
from dataloader.dataloaders import train_dataloader, val_dataloader
from network import get_model
from eval import evaluate
from options import opt
from scheduler import schedulers
from utils import init_log, seed_everything, load_meta, save_meta
from mscv.summary import create_summary_writer, write_meters_loss, write_image
from mscv.image import tensor2im
# from utils.send_sms import send_notification
import misc_utils as utils
import random
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
# 初始化
with torch.no_grad():
# 设置随机种子
if opt.seed is not None:
seed_everything(opt.seed)
# 初始化路径
save_root = os.path.join(opt.checkpoint_dir, opt.tag)
log_root = os.path.join(opt.log_dir, opt.tag)
utils.try_make_dir(save_root)
utils.try_make_dir(log_root)
# dataloader
train_dataloader = train_dataloader
val_dataloader = val_dataloader
# 初始化日志
logger = init_log(training=True)
# 初始化训练的meta信息
meta = load_meta(new=True)
save_meta(meta)
# 初始化模型
Model = get_model(opt.model)
model = Model(opt, logger)
# 暂时还不支持多GPU
# if len(opt.gpu_ids):
# model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model = model.to(device=opt.device)
if opt.load:
load_epoch = model.load(opt.load)
start_epoch = load_epoch + 1 if opt.resume else 1
else:
start_epoch = 1
model.train()
# 开始训练
print('Start training...')
start_step = (start_epoch - 1) * len(train_dataloader)
global_step = start_step
total_steps = opt.epochs * len(train_dataloader)
start = time.time()
# 定义scheduler
scheduler = model.scheduler
# tensorboard日志
writer = create_summary_writer(log_root)
start_time = time.time()
# 在日志记录transforms
logger.info('train_trasforms: ' +str(train_dataloader.dataset.transforms))
logger.info('===========================================')
if val_dataloader is not None:
logger.info('val_trasforms: ' +str(val_dataloader.dataset.transforms))
logger.info('===========================================')
# 在日志记录scheduler
if opt.scheduler in schedulers:
logger.info('scheduler: (Lambda scheduler)\n' + str(schedulers[opt.scheduler]))
logger.info('===========================================')
# 训练循环
try:
eval_result = ''
for epoch in range(start_epoch, opt.epochs + 1):
for iteration, sample in enumerate(train_dataloader):
global_step += 1
# 计算剩余时间
rate = (global_step - start_step) / (time.time() - start)
remaining = (total_steps - global_step) / rate
# --debug模式下只训练10个batch
if opt.debug and iteration > 10:
break
sample['global_step'] = global_step
# 更新网络参数
updated = model.update(sample)
predicted = updated.get('predicted')
pre_msg = 'Epoch:%d' % epoch
# 显示进度条
msg = f'lr:{round(scheduler.get_lr()[0], 6) : .6f} (loss) {str(model.avg_meters)} ETA: {utils.format_time(remaining)}'
utils.progress_bar(iteration, len(train_dataloader), pre_msg, msg)
# print(pre_msg, msg)
if global_step % 1000 == 0: # 每1000个step将loss写到tensorboard
write_meters_loss(writer, 'train', model.avg_meters, global_step)
# 记录训练日志
logger.info(f'Train epoch: {epoch}, lr: {round(scheduler.get_lr()[0], 6) : .6f}, (loss) ' + str(model.avg_meters))
if epoch % opt.save_freq == 0 or epoch == opt.epochs: # 最后一个epoch要保存一下
model.save(epoch)
# 训练时验证
if not opt.no_eval and epoch % opt.eval_freq == 0:
model.eval()
evaluate(model, val_dataloader, epoch, writer, logger, data_name='val')
model.train()
if scheduler is not None:
scheduler.step()
# 保存结束信息
if opt.tag != 'cache':
with open('run_log.txt', 'a') as f:
f.writelines(' Accuracy:' + eval_result + '\n')
meta = load_meta()
meta[-1]['finishtime'] = utils.get_time_stamp()
save_meta(meta)
except Exception as e:
# if not opt.debug: # debug模式不会发短信 12是短信模板字数限制
# send_notification([opt.tag[:12], str(e)[:12]], template='error')
if opt.tag != 'cache':
with open('run_log.txt', 'a') as f:
f.writelines(' Error: ' + str(e)[:120] + '\n')
meta = load_meta()
meta[-1]['finishtime'] = utils.get_time_stamp()
save_meta(meta)
# print(e)
raise Exception('Error') # 再引起一个异常,这样才能打印之前的trace back信息
except: # 其他异常,如键盘中断等
meta = load_meta()
meta[-1]['finishtime'] = utils.get_time_stamp()
save_meta(meta) |
the-stack_106_29186 | # Copyright 2017 Presys Instrumentos e Sistemas Ltda.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Renato José da Silva"
__credits__ = ["Renato José da Silva, Ricardo Lopes de Goes"]
__version__ = "1.0.0"
__maintainer__ = "Renato José da Silva"
__email__ = "[email protected]"
from http.client import HTTPConnection
from base64 import b64encode
import os
ipConfigFile = 'ip_address_config.txt'
port = 5000
url = '/pconserver/pages/setpconmode.cgi?type={}'
method = 'GET'
print('Select the desired mode:')
print('1 - Control')
print('2 - Measure')
print('3 - Vent')
print('4 - Reset')
option = input('')
mode = ""
if option == '1':
mode = 'CONTROL'
elif option == '2':
mode = 'MEASURE'
elif option == '3':
mode = "VENT"
elif option == '4':
mode = "RESETPRESSON"
print("Mode: " + mode)
username = 'admin'
password = 'xvmaster'
currentDirectory = os.path.dirname(__file__)
fullPath = os.path.join(currentDirectory, ipConfigFile)
file = open(fullPath)
ipValue = file.read()
connection = HTTPConnection(ipValue, port=port)
authKey = b64encode((username+":"+password).encode('utf-8')).decode('utf-8')
headers = {"Authorization":"Basic " + authKey}
connection.request(method, url.format(mode), headers=headers)
response = connection.getresponse()
print("http://" + ipValue + ":" + str(port) + url.format(mode))
print(response.read().decode())
|
the-stack_106_29187 | # from https://github.com/amdegroot/ssd.pytorch
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from numpy import random
from PIL import Image
from torchvision.transforms import functional as F
from torchvision.transforms import Normalize as TorchNormalize
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1])) # [A,B]
area_b = ((box_b[2] - box_b[0]) *
(box_b[3] - box_b[1])) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, boxes=None, labels=None):
for t in self.transforms:
img, boxes, labels = t(img, boxes, labels)
return img, boxes, labels
class Lambda(object):
"""Applies a lambda as a transform."""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img, boxes=None, labels=None):
return self.lambd(img, boxes, labels)
class ConvertFromInts(object):
def __call__(self, image, boxes=None, labels=None):
return image.astype(np.float32), boxes, labels
class SubtractMeans(object):
def __init__(self, mean):
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, boxes=None, labels=None):
image = image.astype(np.float32)
image -= self.mean
return image.astype(np.float32), boxes, labels
class ToAbsoluteCoords(object):
def __call__(self, image, boxes=None, labels=None):
height, width, channels = image.shape
boxes[:, 0] *= width
boxes[:, 2] *= width
boxes[:, 1] *= height
boxes[:, 3] *= height
return image, boxes, labels
class ToPercentCoords(object):
def __call__(self, image, boxes=None, labels=None):
height, width, channels = image.shape
boxes[:, 0] /= width
boxes[:, 2] /= width
boxes[:, 1] /= height
boxes[:, 3] /= height
return image, boxes, labels
class Normalize(object):
def __init__(self, mean=0, std=1):
self.mean = mean
self.std = std
def __call__(self, image, boxes=None, labels=None):
image = image-self.mean
image = image/self.std
return image, boxes, labels
class Resize(object):
def __init__(self, size=300):
self.size = size
def __call__(self, image, boxes=None, labels=None):
image = cv2.resize(image, (self.size,
self.size))
return image, boxes, labels
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 1] *= random.uniform(self.lower, self.upper)
return image, boxes, labels
class RandomHue(object):
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 0] += random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, boxes, labels
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
swap = self.perms[random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, boxes, labels
class ConvertColor(object):
def __init__(self, current, transform):
self.transform = transform
self.current = current
def __call__(self, image, boxes=None, labels=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'RGB' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif self.current == 'BGR' and self.transform == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
elif self.current == 'HSV' and self.transform == "RGB":
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
else:
raise NotImplementedError
return image, boxes, labels
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return image, boxes, labels
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return image, boxes, labels
class ToCV2Image(object):
def __call__(self, tensor, boxes=None, labels=None):
return tensor.cpu().numpy().astype(np.float32).transpose((1, 2, 0)), boxes, labels
class ToTensor(object):
def __call__(self, cvimage, boxes=None, labels=None):
return torch.from_numpy(cvimage.astype(np.float32)).permute(2, 0, 1), boxes, labels
class RandomSampleCrop(object):
"""Crop
Arguments:
img (Image): the image being input during training
boxes (Tensor): the original bounding boxes in pt form
labels (Tensor): the class labels for each bbox
mode (float tuple): the min and max jaccard overlaps
Return:
(img, boxes, classes)
img (Image): the cropped image
boxes (Tensor): the adjusted bounding boxes in pt form
labels (Tensor): the class labels for each bbox
"""
def __init__(self):
self.sample_options = (
# using entire original input image
None,
# sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9
(0.1, None),
(0.3, None),
(0.7, None),
(0.9, None),
# randomly sample a patch
(None, None),
)
def __call__(self, image, boxes=None, labels=None):
# guard against no boxes
if boxes is not None and boxes.shape[0] == 0:
return image, boxes, labels
height, width, _ = image.shape
while True:
# randomly choose a mode
mode = random.choice(self.sample_options)
if mode is None:
return image, boxes, labels
min_iou, max_iou = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
# max trails (50)
for _ in range(50):
current_image = image
w = random.uniform(0.3 * width, width)
h = random.uniform(0.3 * height, height)
# aspect ratio constraint b/t .5 & 2
if h / w < 0.5 or h / w > 2:
continue
left = random.uniform(width - w)
top = random.uniform(height - h)
# convert to integer rect x1,y1,x2,y2
rect = np.array([int(left), int(top), int(left + w), int(top + h)])
# calculate IoU (jaccard overlap) b/t the cropped and gt boxes
overlap = jaccard_numpy(boxes, rect)
# is min and max overlap constraint satisfied? if not try again
# check that at least 1 gtbox has IoU acceptable considering the constraints
if overlap.max() < min_iou or overlap.min() > max_iou:
continue
# cut the crop from the image
current_image = current_image[rect[1]:rect[3], rect[0]:rect[2],
:]
# keep overlap with gt box IF center in sampled patch
centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0
# mask in all gt boxes that above and to the left of centers
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
# mask in all gt boxes that under and to the right of centers
m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])
# mask in that both m1 and m2 are true
mask = m1 * m2
# have any valid boxes? try again if not
if not mask.any():
continue
# take only matching gt boxes
current_boxes = boxes[mask, :].copy()
# take only matching gt labels
current_labels = labels[mask]
# should we use the box left and top corner or the crop's
current_boxes[:, :2] = np.maximum(current_boxes[:, :2],
rect[:2])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, :2] -= rect[:2]
current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:],
rect[2:])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, 2:] -= rect[:2]
return current_image, current_boxes, current_labels
class Expand(object):
def __init__(self, mean):
self.mean = mean
def __call__(self, image, boxes, labels):
if random.randint(2):
return image, boxes, labels
height, width, depth = image.shape
ratio = random.uniform(1, 4)
left = random.uniform(0, width * ratio - width)
top = random.uniform(0, height * ratio - height)
expand_image = np.zeros(
(int(height * ratio), int(width * ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = self.mean
expand_image[int(top):int(top + height),
int(left):int(left + width)] = image
image = expand_image
boxes = boxes.copy()
boxes[:, :2] += (int(left), int(top))
boxes[:, 2:] += (int(left), int(top))
return image, boxes, labels
class RandomMirror(object):
def __call__(self, image, boxes, classes):
_, width, _ = image.shape
if random.randint(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
return image, boxes, classes
class SwapChannels(object):
"""Transforms a tensorized image by swapping the channels in the order
specified in the swap tuple.
Args:
swaps (int triple): final order of channels
eg: (2, 1, 0)
"""
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
"""
Args:
image (Tensor): image tensor to be transformed
Return:
a tensor with channels swapped according to swap
"""
# if torch.is_tensor(image):
# image = image.data.cpu().numpy()
# else:
# image = np.array(image)
image = image[:, :, self.swaps]
return image
class PhotometricDistort(object):
def __init__(self):
self.pd = [
RandomContrast(), # RGB
ConvertColor(current="RGB", transform='HSV'), # HSV
RandomSaturation(), # HSV
RandomHue(), # HSV
ConvertColor(current='HSV', transform='RGB'), # RGB
RandomContrast() # RGB
]
self.rand_brightness = RandomBrightness()
self.rand_light_noise = RandomLightingNoise()
def __call__(self, image, boxes, labels):
im = image.copy()
im, boxes, labels = self.rand_brightness(im, boxes, labels)
if random.randint(2):
distort = Compose(self.pd[:-1])
else:
distort = Compose(self.pd[1:])
im, boxes, labels = distort(im, boxes, labels)
return self.rand_light_noise(im, boxes, labels)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img, bboxes=None, labels=None):
return self.lambd(img), bboxes, labels
def __repr__(self):
return self.__class__.__name__ + '()'
class SSDColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = random.uniform(-hue, hue)
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img, bboxes=None, bboxes_labels=None):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
PILimg = Image.fromarray(img.astype(np.uint8))
transformed, _, _ = transform(PILimg)
array = np.array(transformed.convert("RGB"))
# execute at the end a randomlighting noise as per original paper
last_trans = RandomLightingNoise()
image, _, _ = last_trans(array.astype(np.float32))
return image, bboxes, bboxes_labels
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class ToNormalDomain(object):
def __call__(self, image, bboxes=None, labels=None):
image = image / 255
return image, bboxes, labels
class ToPixelDomain(object):
def __call__(self, image, bboxes=None, labels=None):
image = image * 255
return image, bboxes, labels
class DeNormalize(object):
def __init__(self, mean, std, reshape=False):
"""
if reshape:
self.mean = torch.Tensor(mean)
self.std = torch.Tensor(std)
else:
"""
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.reshape = reshape
def __call__(self, tensor):
if self.reshape:
tensor, _, _ = (ToCV2Image())(tensor)
tensor = tensor*self.std
tensor = tensor+self.mean
if self.reshape:
tensor, _, _ = (ToTensor())(tensor)
return tensor
|
the-stack_106_29188 | from bs4 import BeautifulSoup
import numpy as np
import requests
import re
import time
import random
class TradeList(object):
"""
A web scraper to extract sales attributes from autotrader.co.uk
Specify the make, model, postcode and search radius for the vehicle
Part of a larger machine learning sales analytics project.
"""
def __init__(self, make, car_model, postcode, radius):
self.make = make
self.car_model = car_model
self.postcode = postcode
self.radius = radius
self.url_format = "http://www.autotrader.co.uk/search/used/cars/{make}/{car_model}/" \
"postcode/{postcode}/radius/{radius}/searchcontext/default/sort/" \
"priceasc/onesearchad/new%2Cnearlynew%2Cused/page/{page_num}"
self.soup = None
self.print_intro()
def print_intro(self):
print('=' * 8)
print('Autotrader Scraping Tool - Luke Woods 2016')
print('Vehicle: {}, {}. Searching... {} miles around {}.'
.format(self.make, self.car_model, self.radius, self.postcode))
def get_url(self, page_num=1):
"""Creates and returns the URL"""
return self.url_format.format(make=self.make, car_model=self.car_model, postcode=self.postcode,
radius=self.radius, page_num=page_num)
def load_page(self, page_num=1):
"""Runs BeautifulSoup module on the results page."""
r = requests.get(self.get_url(page_num))
self.soup = BeautifulSoup(r.text, "html.parser")
def get_num_pages(self):
"""Returns the number of pages of results."""
page_res = self.soup.html.body.findAll('li', {'class': 'paginationMini__count'})
page_text = ''.join([i.text for i in page_res])
page_nums = (page_text.split(' ')[-1])
return page_nums
def get_prices(self):
"""Returns the car prices in a numpy array"""
price_range = self.soup.html.body.findAll('div', {'class': 'search-result__price'})
prices = np.zeros(12)
k = 0
for i in price_range:
# convert string into integer
prices[k] = int(re.sub('[^\d\.]', '', i.text[1:]))
k += 1
# remove first and last entries (ads)
prices = prices[1:-1]
print("Prices extracted ✓")
return prices
def get_attributes(self):
"""Returns car attributes"""
attributes = self.soup.html.body.findAll('ul', {'class': 'search-result__attributes'})
summary = []
category = []
for k in range(1, 11):
car_attr = ''
c = 0
for counter, item in enumerate(attributes[k].findAll('li')):
if attributes[k].find(class_='js-tooltip') and counter == 0:
c = 1
continue
else:
car_attr += item.text + ' '
category.append(c)
summary.append(car_attr)
return summary, category
def get_url_ids(self):
"""Return unique url ID."""
links = self.soup.html.body.findAll('a', {'class': 'gui-test-search-result-link'})
url_id = [''] * 10
k = 0
for link in links:
url_id[k] = str(re.findall('\d+', link.get('href'))[0])
k += 1
return url_id
def get_urls(self):
"""Return the url."""
links = self.soup.html.body.findAll('a', {'class': 'gui-test-search-result-link'})
url = [''] * 10
k = 0
for link in links:
url[k] = link.get('href')
k += 1
return url
def run(self, listings, pages=3, start_page=1, delay=1):
"""Loops over search results and returns an array of vehicle attributes"""
price_array = np.array([])
attr_array = np.array([], dtype=object)
url_id_array = np.array([])
url_array = np.array([])
category_array = np.array([])
print('='*8)
print('BEGIN LOOP')
ts = time.time()
for page_num in range(start_page, start_page + pages):
listings.load_page(page_num)
print(listings.get_url(page_num))
if page_num == start_page:
print(" → of {} total pages".format(listings.get_num_pages()))
print('')
print("Processing page {} of {}...".format(page_num, pages))
try:
# Append attributes into array
price_array = np.append(price_array, listings.get_prices())
attr_array = np.append(attr_array, listings.get_attributes()[0])
category_array = np.append(category_array, listings.get_attributes()[1])
url_id_array = np.append(url_id_array, listings.get_url_ids())
url_array = np.append(url_array, listings.get_urls())
except Exception as e:
print('An error occurred on page {}: {}'.format(page_num, e))
print(len(price_array), len(attr_array), len(url_id_array), len(url_array))
# Sleep delay
random_sleep = delay + delay*(random.randint(0, 1000) / 1000)
print('({:0.4} s delay)'.format(random_sleep))
time.sleep(random_sleep)
print('')
print('Time taken: {:0.2} s'.format(time.time() - ts))
return price_array, attr_array, url_id_array, url_array, category_array
|
the-stack_106_29189 | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[1]) # number=38
prog.y(input_qubit[1]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2807.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_106_29190 | '''
___ _ ___ __ _______
/\/\ _ __ / _ \ | __ _ _ _ ___ _ __ / __\ / / \_ _ /
/ \| '__/ /_)/ |/ _` | | | |/ _ \ '__|____ / / / / / /
/ /\/\ \ | / ___/| | (_| | |_| | __/ | |_____/ /___/ /___/\/ /_
\/ \/_| \/ |_|\__,_|\__, |\___|_| \____/\____/\____/
|___/
'''
##############################################################################
# (c) Akshat Chauhan ,2021 #
# This is the command line interface for MrPlayer #
# this can play any Mp3 file #
# MrPlayer can be downloaded from https://AkshatChauhan18.github.io/MrPlayer #
##############################################################################
import argparse
import pygame
from rich import console
from rich.syntax import Syntax
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
from prompt_toolkit.layout import HSplit, Layout, VSplit
from prompt_toolkit.styles import Style
from prompt_toolkit.widgets import Box, Button, Frame, Label, TextArea
import lyricsgenius
import os
import os.path
import shutil
pygame.mixer.init()
console = console.Console()
dest_f = f"{os.path.expanduser('~')}\\Music\\MrPlayer-songs"
def player(args):
if args.playsong:
pygame.mixer.music.load(args.playsong.strip())
pygame.mixer.music.play()
def play_clicked():
pygame.mixer.music.unpause()
def pause_clicked():
pygame.mixer.music.pause()
def exit_clicked():
pygame.mixer.music.stop()
get_app().exit()
def rewind():
pygame.mixer.music.rewind()
# All the widgets for the UI.
play_btn = Button("Play", handler=play_clicked)
pause_btn = Button("Pause", handler=pause_clicked)
rewind_btn = Button("Rewind", handler=rewind)
exit_btn = Button("Exit", handler=exit_clicked)
text_area = TextArea(focusable=False, height=10, width=70)
text_area.text = '''
___ _ ___ __ _______
/\/\ _ __ / _ \ | __ _ _ _ ___ _ __ / __\ / / \_ _ /
/ \| '__/ /_)/ |/ _` | | | |/ _ \ '__|____ / / / / / /
/ /\/\ \ | / ___/| | (_| | |_| | __/ | |_____/ /___/ /___/\/ /_
\/ \/_| \/ |_|\__,_|\__, |\___|_| \____/\____/\____/
|___/
'''
copy_right_label = Label(text='Copyright (c)\n2021,\nAkshat Chauhan')
song_name = Label(text=f"Now playing {args.playsong}")
# Combine all the widgets in a UI.
# The `Box` object ensures that padding will be inserted around the containing
# widget. It adapts automatically, unless an explicit `padding` amount is given.
root_container = Box(
HSplit(
[
Label(text="Press `Tab` to move down and `Shift+Tab` to move up"),
VSplit(
[
Box(
body=HSplit(
[play_btn, pause_btn, rewind_btn, exit_btn, copy_right_label], padding=1),
padding=1,
style="class:left-pane",
),
Box(
HSplit(
[
Box(
body=Frame(
text_area
)
),
Box(
body=Frame(song_name)
)
]) , padding=1,
style="class:right-pane"),
]
)
]
),
)
layout = Layout(container=root_container, focused_element=pause_btn)
# Key bindings.
kb = KeyBindings()
kb.add("tab")(focus_next)
kb.add("s-tab")(focus_previous)
# Styling.
style = Style(
[
("left-pane", "bg:#888800 #000000"),
("right-pane", "bg:#00aa00 #000000"),
("button", "#000000"),
("button-arrow", "#000000"),
("button focused", "bg:#ff0000"),
("text-area focused", "bg:#ff0000"),
]
)
# Build a main application object.
application = Application(
layout=layout, key_bindings=kb, style=style, full_screen=True)
application.run()
if args.getlyrics:
song=None
try:
api_key = open(
f"{os.path.expanduser('~')}\\.MrPlayer\\api_key.txt").read()
genius = lyricsgenius.Genius(api_key)
if args.singer:
song = genius.search_song(args.getlyrics.strip(), args.singer.strip())
else:
song = genius.search_song(args.getlyrics.strip(),'')
lyrics = song.lyrics
console.rule(f'[bold red]{args.getlyrics}')
console.print(lyrics)
except:
console.print_exception()
if args.version:
console.print('[green]v1.1.0')
if args.sourcecode:
console.rule('[bold red]Code')
code_file = Syntax.from_path(f"{os.path.expanduser('~')}\\.MrPlayer\\mpc.py",line_numbers=True) # this file will be created by installer
console.print(code_file)
if args.addtrack:
try:
file = str(args.addtrack)
if file.endswith('.mp3'):
if args.movetrack:
console.print('[cyan]Moving track ...')
shutil.move(f'{file}', dest_f)
console.print('[green]Done')
else:
console.print('[cyan]Coping track ...')
shutil.copy(f'{file}', dest_f)
console.print('[green]Done')
else:
console.print('[red]Sorry not a ".mp3" file !')
except Exception as e:
console.print(f'[red]{e}')
if args.addfolder:
files = os.listdir(args.addfolder)
try:
if args.movetrack:
for track in files:
if track.endswith('.mp3'):
console.print(f'[cyan]Moving track [yellow]{track} ...')
shutil.move(f'{args.addfolder}\\{track}', dest_f)
console.print('[green]Done')
else:
for track in files:
if track.endswith('.mp3'):
console.print(f'[cyan]Coping track [yellow]{track} ...')
shutil.copy(f'{args.addfolder}\\{track}', dest_f)
console.print('[green]Done')
except Exception as e:
console.print(f'[red]{e}')
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('-ps', '--playsong', help='This command is for playing mp3')
parse.add_argument('-gl', '--getlyrics', help='This command gets lyrics')
parse.add_argument('-si', '--singer', help='This command is used with --gl command to enter singer')
parse.add_argument('-v', '--version', help='This command shows current version',action='store_true')
parse.add_argument('-sc', '--sourcecode', help='This command shows the source code.',action='store_true')
parse.add_argument('-at ', '--addtrack', help='This command is used add sound track to MrPlayer folder.')
parse.add_argument('-af', '--addfolder', help='This command is used to add all sound tracks of the specified folder to the MrPlayer folder')
parse.add_argument('-m', '--movetrack', help= 'This command is used with "-at" & "-af" to move sound track instead of copying sound track', action='store_true')
args = parse.parse_args()
player(args)
|
the-stack_106_29191 | """
LIF (Leaky integrate-and-fire) Neuron model
Copyright(c) HiroshiARAKI
"""
import numpy as np
import matplotlib.pyplot as plt
from .neuron import Neuron
from ..tools import kernel
class LIF(Neuron):
"""
LIF: leaky integrate-and-fire model
"""
def __init__(self,
time: int,
dt: float = 1.0,
rest=-65,
th=-40,
ref=3,
tc_decay=100,
k='single',
tau: tuple = (20, ),
**kwargs):
"""
Initialize Neuron parameters
:param time: experimental time
:param dt: time step
:param rest: resting potential
:param th: threshold
:param ref: refractory period
:param tc_decay: time constance
:param k: kernel {'single', 'double'}
:param tau: exponential decays as tuple(tau_1 ,tau_2) or float
"""
super().__init__(time, dt)
if k not in ['single', 'double']:
print('The kernel is selected "single".')
k = 'single'
self.rest = kwargs.get('rest', rest)
self.th = kwargs.get('th', th)
self.ref = kwargs.get('ref', ref)
self.tc_decay = kwargs.get('tc_decay', tc_decay)
self.monitor = {}
self.kernel = kernel[kwargs.get('k', k)] # default: single exp filter
self.tau = tau if type(tau) is tuple else (tau, )
def calc_v(self, data):
"""
Calculate Membrane Voltage
:param data: tuple(spikes[], weight[])
:return:
"""
spikes = np.array(data[0])
weights = np.array(data[1])
data = [
spikes[i] * weights[i]
for i in range(weights.size)
]
time = int(self.time / self.dt)
data = np.sum(data, 0)
data = np.convolve(data,
self.kernel(np.arange(0, self.time, self.dt),
self.tau)
)[0:time]
# initialize
f_last = 0 # last firing time
vpeak = 20 # the peak of membrane voltage
spikes = np.zeros(time)
v = self.rest # set to resting voltage
v_monitor = [] # monitor voltage
# Core of LIF
for t in range(time):
dv = ((self.dt * t) > (f_last + self.ref)) * (-v + self.rest) / self.tc_decay + data[t]
v = v + self.dt * dv # calc voltage
f_last = f_last + (self.dt * t - f_last) * (v >= self.th) # if fires, memory the firing time
v = v + (vpeak - v) * (v >= self.th) # set to peak
v_monitor.append(v)
spikes[t] = (v >= self.th) * 1 # set to spike
v = v + (self.rest - v) * (v >= self.th) # return to resting voltage
self.monitor['s'] = spikes
self.monitor['v'] = v_monitor
self.monitor['f'] = np.arange(0, self.time, self.dt)[v >= self.th], # real firing times
return v_monitor, spikes, self.monitor['f']
def plot_v(self, save=False, filename='lif.png', **kwargs):
"""
plot membrane potential
:param save:
:param filename:
:param kwargs:
:return:
"""
x = np.arange(0, self.time, self.dt)
plt.title('LIF Neuron model Simulation')
plt.plot(x, self.monitor['v'])
plt.ylabel('V [mV]')
plt.xlabel('time [ms]')
if not save:
plt.show()
else:
plt.savefig(filename, dpi=kwargs.get('dpi', 150))
plt.close()
class IF(LIF):
"""
IF: integrate-and-fire model
"""
def __init__(self,
time: int,
dt: float = 1.0,
rest=-65,
th=-40,
ref=3,
k='single',
tau: tuple = (20, ),
**kwargs):
"""
Initialize Neuron parameters
:param time:
:param dt:
:param rest:
:param th:
:param ref:
:param k:
:param tau:
:param kwargs:
"""
super().__init__(time=time,
dt=dt,
rest=rest,
th=th,
ref=ref,
k=k,
tau=tau,
**kwargs)
def calc_v(self, data):
"""
Calculate Membrane Voltage
:param data: tuple(spikes[], weight[])
:return membrane voltage, output spikes, firing times:
"""
spikes = np.array(data[0])
weights = np.array(data[1])
peak = 20 # the peak of membrane voltage
f_last = -100 # last firing time
t_ref = int(self.ref / self.dt) # refractory period [x dt ms]
v = np.zeros(int(self.time / self.dt)) + self.rest # all membrane voltage set to resting vol.
input_spikes = np.array([
spikes[i] * weights[i]
for i in range(weights.size)
])
input_spikes = np.sum(input_spikes, 0)
for t, s in enumerate(input_spikes):
if s: # if fires,
# and be not in refractory period, calculate voltage
v[t:] += (t > (f_last + t_ref)) * s * self.kernel(np.arange(0, v[t:].size, 1) * self.dt, self.tau)
if v[t] >= self.th:
v[t] = peak
v[t+1:] = self.rest # return to resting voltage
f_last = t # memory the firing time
self.monitor = {
'v': v,
's': [v >= self.th], # boolean spike trains
'f': np.arange(0, self.time, self.dt)[v >= self.th], # real firing times
}
return v, self.monitor['s'], self.monitor['f']
|
the-stack_106_29195 | from kit.dependency_file import *
class probability:
def set_exp_pr(self,pop,pressure):
self.pop = pop;
sigma = 0;
for x in (pop):
sigma += (math.exp(-pressure * x['evaluation']));
for x in pop:
x['pr'] = (math.exp(-pressure * x['evaluation'])) / sigma;
return pop;
|
the-stack_106_29196 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Dict, Any, Tuple
import torch
import torch.distributed as dist
from torch.distributed._sharding_spec import ShardMetadata
from torchrec.distributed.dist_data import (
PooledEmbeddingsReduceScatter,
SequenceEmbeddingAllToAll,
)
from torchrec.distributed.embedding_lookup import (
GroupedPooledEmbeddingsLookup,
GroupedEmbeddingsLookup,
)
from torchrec.distributed.embedding_sharding import (
group_tables,
SparseFeaturesAllToAll,
BasePooledEmbeddingDist,
BaseSparseFeaturesDist,
EmbeddingSharding,
BaseSequenceEmbeddingDist,
SequenceShardingContext,
BaseEmbeddingLookup,
bucketize_kjt_before_all2all,
)
from torchrec.distributed.embedding_types import (
SparseFeaturesList,
ShardedEmbeddingTable,
GroupedEmbeddingConfig,
SparseFeatures,
EmbeddingComputeKernel,
BaseGroupedFeatureProcessor,
)
from torchrec.distributed.types import (
ShardedTensorMetadata,
Awaitable,
ParameterSharding,
)
from torchrec.modules.embedding_configs import EmbeddingTableConfig
class RwSparseFeaturesDist(BaseSparseFeaturesDist[SparseFeatures]):
def __init__(
self,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
pg: dist.ProcessGroup,
num_id_list_features: int,
num_id_score_list_features: int,
id_list_feature_hash_sizes: List[int],
id_score_list_feature_hash_sizes: List[int],
device: Optional[torch.device] = None,
is_sequence: bool = False,
has_feature_processor: bool = False,
) -> None:
super().__init__()
self._world_size: int = pg.size()
self._num_id_list_features = num_id_list_features
self._num_id_score_list_features = num_id_score_list_features
id_list_feature_block_sizes = [
(hash_size + self._world_size - 1) // self._world_size
for hash_size in id_list_feature_hash_sizes
]
id_score_list_feature_block_sizes = [
(hash_size + self._world_size - 1) // self._world_size
for hash_size in id_score_list_feature_hash_sizes
]
self.register_buffer(
"_id_list_feature_block_sizes_tensor",
torch.tensor(
id_list_feature_block_sizes,
device=device,
dtype=torch.int32,
),
)
self.register_buffer(
"_id_score_list_feature_block_sizes_tensor",
torch.tensor(
id_score_list_feature_block_sizes,
device=device,
dtype=torch.int32,
),
)
self._dist = SparseFeaturesAllToAll(
pg,
self._world_size * [self._num_id_list_features],
self._world_size * [self._num_id_score_list_features],
device,
)
self._is_sequence = is_sequence
self._has_feature_processor = has_feature_processor
self.unbucketize_permute_tensor: Optional[torch.Tensor] = None
def forward(
self,
sparse_features: SparseFeatures,
) -> Awaitable[Awaitable[SparseFeatures]]:
if self._num_id_list_features > 0:
assert sparse_features.id_list_features is not None
(
id_list_features,
self.unbucketize_permute_tensor,
) = bucketize_kjt_before_all2all(
sparse_features.id_list_features,
num_buckets=self._world_size,
block_sizes=self._id_list_feature_block_sizes_tensor,
output_permute=self._is_sequence,
bucketize_pos=self._has_feature_processor,
)
else:
id_list_features = None
if self._num_id_score_list_features > 0:
assert sparse_features.id_score_list_features is not None
id_score_list_features, _ = bucketize_kjt_before_all2all(
sparse_features.id_score_list_features,
num_buckets=self._world_size,
block_sizes=self._id_score_list_feature_block_sizes_tensor,
output_permute=False,
bucketize_pos=False,
)
else:
id_score_list_features = None
bucketized_sparse_features = SparseFeatures(
id_list_features=id_list_features,
id_score_list_features=id_score_list_features,
)
return self._dist(bucketized_sparse_features)
class RwPooledEmbeddingDist(BasePooledEmbeddingDist[torch.Tensor]):
def __init__(
self,
pg: dist.ProcessGroup,
) -> None:
super().__init__()
self._dist = PooledEmbeddingsReduceScatter(pg)
def forward(self, local_embs: torch.Tensor) -> Awaitable[torch.Tensor]:
return self._dist(local_embs)
class RwSequenceEmbeddingDist(BaseSequenceEmbeddingDist):
def __init__(
self,
pg: dist.ProcessGroup,
num_features: int,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
self._dist = SequenceEmbeddingAllToAll(pg, [num_features] * pg.size(), device)
def forward(
self, sharding_ctx: SequenceShardingContext, local_embs: torch.Tensor
) -> Awaitable[torch.Tensor]:
return self._dist(
local_embs,
lengths=sharding_ctx.lengths_after_input_dist,
input_splits=sharding_ctx.input_splits,
output_splits=sharding_ctx.output_splits,
unbucketize_permute_tensor=sharding_ctx.unbucketize_permute_tensor,
)
class RwEmbeddingSharding(
EmbeddingSharding[
SparseFeatures, torch.Tensor, SparseFeaturesList, List[torch.Tensor]
]
):
"""
Shards embedding bags row-wise, i.e.. a given embedding table is evenly distributed
by rows and table slices are placed on all ranks.
"""
def __init__(
self,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
pg: dist.ProcessGroup,
device: Optional[torch.device] = None,
is_sequence: bool = False,
) -> None:
super().__init__()
# pyre-fixme[4]: Attribute must be annotated.
self._pg = pg
if device is None:
device = torch.device("cpu")
self._device = device
self._is_sequence = is_sequence
sharded_tables_per_rank = self._shard(embedding_configs)
self._grouped_embedding_configs_per_rank: List[
List[GroupedEmbeddingConfig]
] = []
self._score_grouped_embedding_configs_per_rank: List[
List[GroupedEmbeddingConfig]
] = []
(
self._grouped_embedding_configs_per_rank,
self._score_grouped_embedding_configs_per_rank,
) = group_tables(sharded_tables_per_rank)
self._grouped_embedding_configs: List[
GroupedEmbeddingConfig
] = self._grouped_embedding_configs_per_rank[dist.get_rank(pg)]
self._score_grouped_embedding_configs: List[
GroupedEmbeddingConfig
] = self._score_grouped_embedding_configs_per_rank[dist.get_rank(pg)]
self._has_feature_processor: bool = False
for group_config in self._grouped_embedding_configs:
if group_config.has_feature_processor:
self._has_feature_processor = True
def _shard(
self,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
) -> List[List[ShardedEmbeddingTable]]:
world_size = self._pg.size()
tables_per_rank: List[List[ShardedEmbeddingTable]] = [
[] for i in range(world_size)
]
for config in embedding_configs:
# pyre-fixme [16]
shards = config[1].sharding_spec.shards
# construct the global sharded_tensor_metadata
global_metadata = ShardedTensorMetadata(
shards_metadata=shards,
size=torch.Size([config[0].num_embeddings, config[0].embedding_dim]),
)
for rank in range(world_size):
tables_per_rank[rank].append(
ShardedEmbeddingTable(
num_embeddings=config[0].num_embeddings,
embedding_dim=config[0].embedding_dim,
name=config[0].name,
embedding_names=config[0].embedding_names,
data_type=config[0].data_type,
feature_names=config[0].feature_names,
pooling=config[0].pooling,
is_weighted=config[0].is_weighted,
has_feature_processor=config[0].has_feature_processor,
local_rows=shards[rank].shard_sizes[0],
local_cols=config[0].embedding_dim,
compute_kernel=EmbeddingComputeKernel(config[1].compute_kernel),
local_metadata=shards[rank],
global_metadata=global_metadata,
weight_init_max=config[0].weight_init_max,
weight_init_min=config[0].weight_init_min,
)
)
return tables_per_rank
def create_train_input_dist(self) -> BaseSparseFeaturesDist[SparseFeatures]:
num_id_list_features = self._get_id_list_features_num()
num_id_score_list_features = self._get_id_score_list_features_num()
id_list_feature_hash_sizes = self._get_id_list_features_hash_sizes()
id_score_list_feature_hash_sizes = self._get_id_score_list_features_hash_sizes()
return RwSparseFeaturesDist(
pg=self._pg,
num_id_list_features=num_id_list_features,
num_id_score_list_features=num_id_score_list_features,
id_list_feature_hash_sizes=id_list_feature_hash_sizes,
id_score_list_feature_hash_sizes=id_score_list_feature_hash_sizes,
device=self._device,
is_sequence=self._is_sequence,
has_feature_processor=self._has_feature_processor,
)
def create_train_lookup(
self,
fused_params: Optional[Dict[str, Any]],
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
if self._is_sequence:
return GroupedEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
fused_params=fused_params,
pg=self._pg,
device=self._device,
)
else:
return GroupedPooledEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
grouped_score_configs=self._score_grouped_embedding_configs,
fused_params=fused_params,
pg=self._pg,
device=self._device,
feature_processor=feature_processor,
)
def create_train_pooled_output_dist(
self,
device: Optional[torch.device] = None,
) -> RwPooledEmbeddingDist:
return RwPooledEmbeddingDist(self._pg)
def create_train_sequence_output_dist(self) -> RwSequenceEmbeddingDist:
return RwSequenceEmbeddingDist(
self._pg,
self._get_id_list_features_num(),
self._device,
)
def embedding_dims(self) -> List[int]:
embedding_dims = []
for grouped_config in self._grouped_embedding_configs:
embedding_dims.extend(grouped_config.embedding_dims())
for grouped_config in self._score_grouped_embedding_configs:
embedding_dims.extend(grouped_config.embedding_dims())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for grouped_config in self._grouped_embedding_configs:
embedding_names.extend(grouped_config.embedding_names())
for grouped_config in self._score_grouped_embedding_configs:
embedding_names.extend(grouped_config.embedding_names())
return embedding_names
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata = []
for grouped_config in self._grouped_embedding_configs:
embedding_shard_metadata.extend(grouped_config.embedding_shard_metadata())
for grouped_config in self._score_grouped_embedding_configs:
embedding_shard_metadata.extend(grouped_config.embedding_shard_metadata())
return embedding_shard_metadata
def id_list_feature_names(self) -> List[str]:
id_list_feature_names = []
for grouped_config in self._grouped_embedding_configs:
id_list_feature_names.extend(grouped_config.feature_names())
return id_list_feature_names
def id_score_list_feature_names(self) -> List[str]:
id_score_list_feature_names = []
for grouped_config in self._score_grouped_embedding_configs:
id_score_list_feature_names.extend(grouped_config.feature_names())
return id_score_list_feature_names
def _get_id_list_features_num(self) -> int:
return sum(
group_config.num_features()
for group_config in self._grouped_embedding_configs
)
def _get_id_score_list_features_num(self) -> int:
return sum(
group_config.num_features()
for group_config in self._score_grouped_embedding_configs
)
def _get_id_list_features_hash_sizes(self) -> List[int]:
id_list_feature_hash_sizes: List[int] = []
for group_config in self._grouped_embedding_configs:
id_list_feature_hash_sizes.extend(group_config.feature_hash_sizes())
return id_list_feature_hash_sizes
def _get_id_score_list_features_hash_sizes(self) -> List[int]:
id_score_list_feature_hash_sizes: List[int] = []
for group_config in self._score_grouped_embedding_configs:
id_score_list_feature_hash_sizes.extend(group_config.feature_hash_sizes())
return id_score_list_feature_hash_sizes
|
the-stack_106_29199 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests with null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
from test_framework.p2p import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
p2p_lock,
MSG_BLOCK,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
self.recent_headers_announced = []
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(MSG_BLOCK, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(MSG_BLOCK, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
self.wait_until(test_function, timeout=timeout)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
for x in message.headers:
x.calc_sha256()
# append because headers may be announced over multiple messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
with p2p_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_headers_announcement(self, headers):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
self.wait_until(test_function)
with p2p_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_inv_announcement(self, inv):
"""Test whether the last announcement received had the right inv.
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
self.wait_until(test_function)
with p2p_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None)
class SendHeadersTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out block announcements from each p2p listener
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
self.nodes[0].generatetoaddress(count, self.nodes[0].get_deterministic_priv_key().address)
return int(self.nodes[0].getbestblockhash(), 16)
def mine_reorg(self, length):
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note: we clear the state of our p2p connections after the
to-be-reorged-out blocks are mined, so that we don't break later tests.
return the list of block hashes newly mined."""
# make sure all invalidated blocks are node0's
self.nodes[0].generatetoaddress(length, self.nodes[0].get_deterministic_priv_key().address)
self.sync_blocks(self.nodes, wait=0.1)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_block_announcements()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generatetoaddress(length + 1, self.nodes[1].get_deterministic_priv_key().address) # Must be longer than the orig chain
self.sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
self.test_null_locators(test_node, inv_node)
self.test_nonnull_locators(test_node, inv_node)
def test_null_locators(self, test_node, inv_node):
tip = self.nodes[0].getblockheader(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0])
tip_hash = int(tip["hash"], 16)
inv_node.check_last_inv_announcement(inv=[tip_hash])
test_node.check_last_inv_announcement(inv=[tip_hash])
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=tip_hash)
test_node.check_last_headers_announcement(headers=[tip_hash])
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
block.solve()
test_node.send_header_for_blocks([block])
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
inv_node.clear_block_announcements()
test_node.send_message(msg_block(block))
inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
self.log.debug("Part 1.{}: starting...".format(i))
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_block_announcements() # since we requested headers...
elif i == 2:
# this time announce own block via headers
inv_node.clear_block_announcements()
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_and_ping(msg_block(new_block)) # make sure this block is processed
inv_node.wait_until(lambda: inv_node.block_announced)
inv_node.clear_block_announcements()
test_node.clear_block_announcements()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
self.log.debug("Part 2.{}: starting...".format(i))
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
for _ in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
self.log.debug("Part 3.{}: starting...".format(j))
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=new_block_hashes)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
block_time += 9
fork_point = self.nodes[0].getblock("%064x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator=[fork_point])
test_node.check_last_inv_announcement(inv=new_block_hashes)
test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
self.log.debug("Part 3.{}.{}: starting...".format(j, i))
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with p2p_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for _ in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 2
blocks = []
# Create extra blocks for later
for _ in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with p2p_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with p2p_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for _ in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
|
the-stack_106_29200 | # Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api import openstack
from nova.api.openstack import compute
from nova.api.openstack import wsgi
from nova.tests.functional import api_paste_fixture
from nova.tests.functional import integrated_helpers
class LegacyV2CompatibleTestBase(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
def setUp(self):
self.useFixture(api_paste_fixture.ApiPasteV2CompatibleFixture())
super(LegacyV2CompatibleTestBase, self).setUp()
self._check_api_endpoint('/v2', [compute.APIRouterV21,
openstack.LegacyV2CompatibleWrapper])
def test_request_with_microversion_headers(self):
response = self.api.api_post('os-keypairs',
{"keypair": {"name": "test"}},
headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'})
self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers)
self.assertNotIn('Vary', response.headers)
self.assertNotIn('type', response.body["keypair"])
def test_request_without_addtional_properties_check(self):
response = self.api.api_post('os-keypairs',
{"keypair": {"name": "test", "foooooo": "barrrrrr"}},
headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'})
self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers)
self.assertNotIn('Vary', response.headers)
self.assertNotIn('type', response.body["keypair"])
|
the-stack_106_29202 | import logging
from tests.helpers.log_helper import log_operation
def create_namespace(database, namespace_name):
"""
Create a namespace
"""
db, db_name = database
log_operation.info(f"Create a namespace with name '{namespace_name}' on database '{db_name}'")
try:
db.namespace_open(namespace_name)
except Exception as e:
logging.error(e)
def drop_namespace(database, namespace_name):
"""
Drop a namespace
"""
db, db_name = database
log_operation.info(f"Drop a namespace with name '{namespace_name}' on database '{db_name}'")
db.namespace_drop(namespace_name)
def get_namespace_list(database):
"""
Get list of namespaces in database
"""
log_operation.info("Get list of namespaces in database")
db, db_name = database
namespace_list = db.namespaces_enum()
return namespace_list
def get_ns_description(database, namespace):
"""
Get information about namespace in database
"""
db, namespace_name = namespace
namespace_list = get_namespace_list(database)
log_operation.info(f"Get information about namespace {namespace_name} in database")
ns_entry = list(filter(lambda ns: ns['name'] == namespace_name, namespace_list))
return ns_entry
|
the-stack_106_29205 | import cv2 as cv
import numpy as np
class Blender:
BLENDER_CHOICES = ('multiband', 'feather', 'no',)
DEFAULT_BLENDER = 'multiband'
DEFAULT_BLEND_STRENGTH = 5
def __init__(self, blender_type=DEFAULT_BLENDER,
blend_strength=DEFAULT_BLEND_STRENGTH):
self.blender_type = blender_type
self.blend_strength = blend_strength
self.blender = None
def prepare(self, corners, sizes):
dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes)
blend_width = (np.sqrt(dst_sz[2] * dst_sz[3]) *
self.blend_strength / 100)
if self.blender_type == 'no' or blend_width < 1:
self.blender = cv.detail.Blender_createDefault(
cv.detail.Blender_NO
)
elif self.blender_type == "multiband":
self.blender = cv.detail_MultiBandBlender()
self.blender.setNumBands((np.log(blend_width) /
np.log(2.) - 1.).astype(np.int))
elif self.blender_type == "feather":
self.blender = cv.detail_FeatherBlender()
self.blender.setSharpness(1. / blend_width)
self.blender.prepare(dst_sz)
def feed(self, img, mask, corner):
"""https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#a64837308bcf4e414a6219beff6cbe37a""" # noqa
self.blender.feed(cv.UMat(img.astype(np.int16)), mask, corner)
def blend(self):
"""https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#aa0a91ce0d6046d3a63e0123cbb1b5c00""" # noqa
result = None
result_mask = None
result, result_mask = self.blender.blend(result, result_mask)
result = cv.convertScaleAbs(result)
return result
|
the-stack_106_29208 | from pfunk import Collection, StringField, EnumField, Enum, ReferenceField, SlugField
from pfunk.resources import Index
from pfunk.contrib.auth.collections import User, Group
from pfunk.contrib.auth.resources import GenericGroupBasedRole, GenericUserBasedRole
GENDER_PRONOUN = Enum(name='gender_pronouns', choices=['he', 'her', 'they'])
class SimpleIndex(Index):
name = 'simple-index'
terms = ['name', 'slug']
unique = True
source = 'Project'
class Sport(Collection):
_use_crud_functions = True
name = StringField(required=True)
slug = SlugField()
def __unicode__(self):
return self.name
class Meta:
unique_together = [('name', 'slug')]
class Person(Collection):
_roles = [GenericGroupBasedRole]
_verbose_plural_name = 'people'
first_name = StringField(required=True)
last_name = StringField(required=True)
gender_pronoun = EnumField(GENDER_PRONOUN)
sport = ReferenceField(Sport)
group = ReferenceField(Group)
def __unicode__(self):
return f"{self.first_name} {self.last_name}"
class House(Collection):
_roles = [GenericUserBasedRole]
address = StringField(required=True)
user = ReferenceField(User)
def __unicode__(self):
return self.address |
the-stack_106_29210 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tensorflow.compat.v1 as tf
from . import modeling
def _get_initializer(FLAGS):
"""Get variable intializer."""
if FLAGS.init == 'uniform':
initializer = tf.initializers.random_uniform(
minval=-FLAGS.init_range, maxval=FLAGS.init_range, seed=None
)
elif FLAGS.init == 'normal':
initializer = tf.initializers.random_normal(
stddev=FLAGS.init_std, seed=None
)
else:
raise ValueError('Initializer {} not supported'.format(FLAGS.init))
return initializer
class XLNetConfig(object):
"""XLNetConfig contains hyperparameters that are specific to a model checkpoint;
i.e., these hyperparameters should be the same between
pretraining and finetuning.
The following hyperparameters are defined:
n_layer: int, the number of layers.
d_model: int, the hidden size.
n_head: int, the number of attention heads.
d_head: int, the dimension size of each attention head.
d_inner: int, the hidden size in feed-forward layers.
ff_activation: str, "relu" or "gelu".
untie_r: bool, whether to untie the biases in attention.
n_token: int, the vocab size.
"""
def __init__(self, FLAGS=None, json_path=None):
"""Constructing an XLNetConfig.
One of FLAGS or json_path should be provided."""
assert FLAGS is not None or json_path is not None
self.keys = [
'n_layer',
'd_model',
'n_head',
'd_head',
'd_inner',
'ff_activation',
'untie_r',
'n_token',
]
if FLAGS is not None:
self.init_from_flags(FLAGS)
if json_path is not None:
self.init_from_json(json_path)
def init_from_flags(self, FLAGS):
for key in self.keys:
setattr(self, key, getattr(FLAGS, key))
def init_from_json(self, json_path):
with tf.gfile.Open(json_path) as f:
json_data = json.load(f)
for key in self.keys:
setattr(self, key, json_data[key])
def to_json(self, json_path):
"""Save XLNetConfig to a json file."""
json_data = {}
for key in self.keys:
json_data[key] = getattr(self, key)
json_dir = os.path.dirname(json_path)
if not tf.gfile.Exists(json_dir):
tf.gfile.MakeDirs(json_dir)
with tf.gfile.Open(json_path, 'w') as f:
json.dump(json_data, f, indent=4, sort_keys=True)
def create_run_config(is_training, is_finetune, FLAGS):
kwargs = dict(
is_training=is_training,
use_tpu=FLAGS.use_tpu,
use_bfloat16=FLAGS.use_bfloat16,
dropout=FLAGS.dropout,
dropatt=FLAGS.dropatt,
init=FLAGS.init,
init_range=FLAGS.init_range,
init_std=FLAGS.init_std,
clamp_len=FLAGS.clamp_len,
)
if not is_finetune:
kwargs.update(
dict(
mem_len=FLAGS.mem_len,
reuse_len=FLAGS.reuse_len,
bi_data=FLAGS.bi_data,
clamp_len=FLAGS.clamp_len,
same_length=FLAGS.same_length,
)
)
return RunConfig(**kwargs)
class RunConfig(object):
"""RunConfig contains hyperparameters that could be different
between pretraining and finetuning.
These hyperparameters can also be changed from run to run.
We store them separately from XLNetConfig for flexibility.
"""
def __init__(
self,
is_training,
use_tpu,
use_bfloat16,
dropout,
dropatt,
init='normal',
init_range=0.1,
init_std=0.02,
mem_len=None,
reuse_len=None,
bi_data=False,
clamp_len=-1,
same_length=False,
):
"""
Args:
is_training: bool, whether in training mode.
use_tpu: bool, whether TPUs are used.
use_bfloat16: bool, use bfloat16 instead of float32.
dropout: float, dropout rate.
dropatt: float, dropout rate on attention probabilities.
init: str, the initialization scheme, either "normal" or "uniform".
init_range: float, initialize the parameters with a uniform distribution
in [-init_range, init_range]. Only effective when init="uniform".
init_std: float, initialize the parameters with a normal distribution
with mean 0 and stddev init_std. Only effective when init="normal".
mem_len: int, the number of tokens to cache.
reuse_len: int, the number of tokens in the currect batch to be cached
and reused in the future.
bi_data: bool, whether to use bidirectional input pipeline.
Usually set to True during pretraining and False during finetuning.
clamp_len: int, clamp all relative distances larger than clamp_len.
-1 means no clamping.
same_length: bool, whether to use the same attention length for each token.
"""
self.init = init
self.init_range = init_range
self.init_std = init_std
self.is_training = is_training
self.dropout = dropout
self.dropatt = dropatt
self.use_tpu = use_tpu
self.use_bfloat16 = use_bfloat16
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
class XLNetModel(object):
"""A wrapper of the XLNet model used during both pretraining and finetuning."""
def __init__(
self,
xlnet_config,
run_config,
input_ids,
seg_ids,
input_mask,
mems=None,
perm_mask=None,
target_mapping=None,
inp_q=None,
**kwargs
):
"""
Args:
xlnet_config: XLNetConfig,
run_config: RunConfig,
input_ids: int32 Tensor in shape [len, bsz], the input token IDs.
seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs.
input_mask: float32 Tensor in shape [len, bsz], the input mask.
0 for real tokens and 1 for padding.
mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
from previous batches. The length of the list equals n_layer.
If None, no memory is used.
perm_mask: float32 Tensor in shape [len, len, bsz].
If perm_mask[i, j, k] = 0, i attend to j in batch k;
if perm_mask[i, j, k] = 1, i does not attend to j in batch k.
If None, each position attends to all the others.
target_mapping: float32 Tensor in shape [num_predict, len, bsz].
If target_mapping[i, j, k] = 1, the i-th predict in batch k is
on the j-th token.
Only used during pretraining for partial prediction.
Set to None during finetuning.
inp_q: float32 Tensor in shape [len, bsz].
1 for tokens with losses and 0 for tokens without losses.
Only used during pretraining for two-stream attention.
Set to None during finetuning.
"""
initializer = _get_initializer(run_config)
tfm_args = dict(
n_token=xlnet_config.n_token,
initializer=initializer,
attn_type='bi',
n_layer=xlnet_config.n_layer,
d_model=xlnet_config.d_model,
n_head=xlnet_config.n_head,
d_head=xlnet_config.d_head,
d_inner=xlnet_config.d_inner,
ff_activation=xlnet_config.ff_activation,
untie_r=xlnet_config.untie_r,
is_training=run_config.is_training,
use_bfloat16=run_config.use_bfloat16,
use_tpu=run_config.use_tpu,
dropout=run_config.dropout,
dropatt=run_config.dropatt,
mem_len=run_config.mem_len,
reuse_len=run_config.reuse_len,
bi_data=run_config.bi_data,
clamp_len=run_config.clamp_len,
same_length=run_config.same_length,
)
input_args = dict(
inp_k=input_ids,
seg_id=seg_ids,
input_mask=input_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
inp_q=inp_q,
)
tfm_args.update(input_args)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
(
self.output,
self.new_mems,
self.lookup_table,
self.lookup_table_2,
) = modeling.transformer_xl(**tfm_args)
self.input_mask = input_mask
self.initializer = initializer
self.xlnet_config = xlnet_config
self.run_config = run_config
def get_pooled_out(self, summary_type, use_summ_proj=True):
"""
Args:
summary_type: str, "last", "first", "mean", or "attn". The method
to pool the input to get a vector representation.
use_summ_proj: bool, whether to use a linear projection during pooling.
Returns:
float32 Tensor in shape [bsz, d_model], the pooled representation.
"""
xlnet_config = self.xlnet_config
run_config = self.run_config
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
summary = modeling.summarize_sequence(
summary_type=summary_type,
hidden=self.output,
d_model=xlnet_config.d_model,
n_head=xlnet_config.n_head,
d_head=xlnet_config.d_head,
dropout=run_config.dropout,
dropatt=run_config.dropatt,
is_training=run_config.is_training,
input_mask=self.input_mask,
initializer=self.initializer,
use_proj=use_summ_proj,
)
return summary
def get_sequence_output(self):
"""
Returns:
float32 Tensor in shape [len, bsz, d_model]. The last layer hidden
representation of XLNet.
"""
return self.output
def get_new_memory(self):
"""
Returns:
list of float32 Tensors in shape [mem_len, bsz, d_model], the new
memory that concatenates the previous memory with the current input
representations.
The length of the list equals n_layer.
"""
return self.new_mems
def get_embedding_table(self):
"""
Returns:
float32 Tensor in shape [n_token, d_model]. The embedding lookup table.
Used for tying embeddings between input and output layers.
"""
return self.lookup_table
def get_embedding_table2(self):
"""
Returns:
float32 Tensor in shape [n_token, d_model]. The embedding lookup table.
Used for tying embeddings between input and output layers.
"""
return self.lookup_table_2
def get_initializer(self):
"""
Returns:
A tf initializer. Used to initialize variables in layers on top of XLNet.
"""
return self.initializer
|
the-stack_106_29213 | __version__ = '1.0.0b4'
import io
import cmd
import sys
import readline
import shlex
import traceback
import importlib.util
from argparse import _SubParsersAction
from argparse import _HelpAction, Action
from contextlib import redirect_stdout
# Prevent to execute exit() when help and error method in argparse.Argparser
sys.exit = lambda: None
################################################################################
class ArgumentCmd(cmd.Cmd):
argument_parser = None
# ==========================================================================
def __init__(self):
super(ArgumentCmd, self).__init__()
readline.set_completer_delims(' ')
ArgumentCmd.read_prompt()
# ==========================================================================
def _do_command(self, line, *args, **kwargs):
parser = self.argument_parser
prog = kwargs["prog"].strip()
try:
command = f'{prog} {line}'.strip()
spec = parser.parse_args(shlex.split(command))
if 'func' not in spec:
return
if any(x in command for x in ('-h', '--help')):
return
spec.func(spec)
except TypeError:
# parse_args raise exception when wrong input.
return
except Exception as e:
traceback.print_exc()
# Todo: Do something for errors
return
# ==========================================================================
def emptyline(self):
pass
# ==========================================================================
def postcmd(self, stop: bool, line: str) -> bool:
ArgumentCmd.read_prompt()
return cmd.Cmd.postcmd(self, stop, line)
# ==========================================================================
def complete(self, text, state):
if state == 0:
origline = readline.get_line_buffer()
line = origline.lstrip()
command, args, foo = self.parseline(line)
if command == '':
result = self.completedefault
else:
result = ArgumentCmd.get_complete_list(
shlex.split(line), self.argument_parser)
result = [x + ' ' for x in result]
result.append(None)
self.completion_matches = result
try:
return self.completion_matches[state]
except IndexError:
return None
# ==========================================================================
@staticmethod
def get_complete_list(line, parser, result=None):
if None is result:
result = list()
word = ''
if line:
word = line.pop(0)
for action in parser._actions:
if isinstance(action, _HelpAction):
continue
elif isinstance(action, _SubParsersAction):
rc = [x for x in action.choices.keys() if x.startswith(word)]
if 1 == len(rc) and word == rc[0]:
return ArgumentCmd.get_complete_list(
line, action.choices[word], list())
result += [x for x in action.choices.keys() if x.startswith(word)]
elif isinstance(action, Action):
result += [x for x in action.option_strings if x.startswith(word)]
else:
pass
return result
# ==========================================================================
@staticmethod
def get_empty_func(_type, action, parser):
def do(self, line):
self._do_command(self, line, prog=action.prog)
def complete(self, text, line, start_index, end_index):
return self._complete_command(
self, text, line, start_index, end_index)
f = {'do': do, 'complete': complete}
return f[_type]
# ==========================================================================
@classmethod
def _add_functions(cls, actions, parser):
for action in actions.choices.values():
with io.StringIO() as buf, redirect_stdout(buf):
print(action.print_usage())
output = buf.getvalue()
for _type in ('do',):
command = ArgumentCmd.get_empty_func(
_type, action=action, parser=parser)
command.__doc__ = output
command.__name__ = f"{_type}_{action.prog.split()[-1].strip()}"
setattr(cls, command.__name__, classmethod(command))
# ==========================================================================
@classmethod
def _add_command(cls, parser):
for action in parser._actions:
if isinstance(action, _HelpAction):
continue
if isinstance(action, _SubParsersAction):
ArgumentCmd._add_functions(action, parser)
# ==========================================================================
@classmethod
def set_cli_parser(cls, parser):
cls.argument_parser = parser
cls._add_command(parser)
# ==========================================================================
@staticmethod
def read_prompt():
try:
with open('prompt', 'r') as f:
prompt = f.read()
if prompt:
ArgumentCmd.prompt = prompt + ' '
except FileNotFoundError:
pass
return ArgumentCmd.prompt
################################################################################
def run(cli_package):
# Import user CLI package
module_name = 'cli'
spec = importlib.util.spec_from_file_location('cli', cli_package)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
cli_parser = module.argument_parser()
# Run CLI-Arguments
command = ArgumentCmd
command.set_cli_parser(cli_parser)
my_cmd = command()
my_cmd.cmdloop()
|
the-stack_106_29214 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
Parsing for the moduli file, which contains Diffie-Hellman prime groups.
Maintainer: Paul Swartz
"""
def parseModuliFile(filename):
lines = open(filename).readlines()
primes = {}
for l in lines:
l = l.strip()
if not l or l[0]=='#':
continue
tim, typ, tst, tri, size, gen, mod = l.split()
size = int(size) + 1
gen = long(gen)
mod = long(mod, 16)
if not primes.has_key(size):
primes[size] = []
primes[size].append((gen, mod))
return primes
|
the-stack_106_29215 |
#importing the necessary modules
from sklearn import datasets
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
#calcuating the mean
def mean(values):
return sum(values)/float(len(values))
#calculating the variance
def variance(values,mean):
return sum([(x-mean)**2 for x in values])
#calcuating the coeffcients b1 and b0 for the equation yhat = b0+b1*x
def coefficients(iris_X,mean_iris_X,iris_Y,mean_iris_Y,var_iris_X):
n = len(iris_X)
numerator = 0
for i in range(n):
numerator += (iris_X[i] - mean_iris_X) * (iris_Y[i] - mean_iris_Y)
b1 = numerator/var_iris_X
b0 = mean_iris_Y - (b1 * mean_iris_X)
return(b1,b0)
#training model and predecting values of dependent variables
def linear_regression(iris_X_train,iris_Y_train,iris_X_test,iris_Y_test):
predection = []
mean_iris_X , mean_iris_Y = mean(iris_X_train), mean(iris_Y_train)
var_iris_X , var_iris_Y = variance(iris_X_train,mean_iris_X), variance(iris_Y_train,mean_iris_Y)
for x in iris_X_test:
b1,b0=coefficients(iris_X_train,mean_iris_X,iris_Y_train,mean_iris_Y,var_iris_X)
yhat = b0 + b1*x
predection.append(yhat)
return predection
#calcuting the mean squared error
def calc_error(predected,iris_Y_test):
m = len(predected)
error = 0
for ele in predected:
i = 0
error += ((ele- iris_Y_test[i])**2 / m)
i+=1
return error
#main function
iris_X , iris_Y = datasets.load_iris(return_X_y = True) #loading the dataset from the sklearn
iris_X = iris_X[:,np.newaxis,2] #settings only petal length and petal width as features
iris_X_train,iris_X_test,iris_Y_train,iris_Y_test = train_test_split(iris_X,
iris_Y,
test_size=.32,random_state=30) #splitting the data into test and train datset
predected = linear_regression(iris_X_train,iris_Y_train,iris_X_test,iris_Y_test) #calling the linear_regression function to predict the values of y
print("Mean squared error : ", calc_error(predected,iris_Y_test)) #displaying the error
#visulaising the model
plt.scatter(iris_X_test,iris_Y_test, color = 'red')
plt.plot(iris_X_test,predected, color = 'blue',linewidth = 2)
#displaying the plot on screen
plt.show() |
the-stack_106_29218 | # Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains all the units for the spec.
This file loads swagger and JSON schema files and parses out the useful bits
and returns them as Units for use in Batesian.
For the actual conversion of data -> RST (including templates), see the sections
file instead.
"""
from batesian.units import Units
from collections import OrderedDict
import logging
import json
import os
import os.path
import re
import subprocess
import sys
import yaml
from functools import reduce
from six.moves.urllib.parse import urlencode, quote
matrix_doc_dir=reduce(lambda acc,_: os.path.dirname(acc),
range(1, 5), os.path.abspath(__file__))
HTTP_APIS = {
os.path.join(matrix_doc_dir, "api/application-service"): "as",
os.path.join(matrix_doc_dir, "api/client-server"): "cs",
os.path.join(matrix_doc_dir, "api/identity"): "is",
os.path.join(matrix_doc_dir, "api/push-gateway"): "push",
os.path.join(matrix_doc_dir, "api/server-server"): "ss",
}
SWAGGER_DEFINITIONS = {
os.path.join(matrix_doc_dir, "api/application-service/definitions"): "as",
os.path.join(matrix_doc_dir, "api/client-server/definitions"): "cs",
os.path.join(matrix_doc_dir, "api/identity/definitions"): "is",
os.path.join(matrix_doc_dir, "api/push-gateway/definitions"): "push",
os.path.join(matrix_doc_dir, "api/server-server/definitions"): "ss",
}
EVENT_EXAMPLES = os.path.join(matrix_doc_dir, "event-schemas/examples")
EVENT_SCHEMA = os.path.join(matrix_doc_dir, "event-schemas/schema")
CORE_EVENT_SCHEMA = os.path.join(matrix_doc_dir, "event-schemas/schema/core-event-schema")
CHANGELOG_DIR = os.path.join(matrix_doc_dir, "changelogs")
TARGETS = os.path.join(matrix_doc_dir, "specification/targets.yaml")
ROOM_EVENT = "core-event-schema/room_event.yaml"
STATE_EVENT = "core-event-schema/state_event.yaml"
SAS_EMOJI_JSON = os.path.join(matrix_doc_dir, "data-definitions/sas-emoji.json")
logger = logging.getLogger(__name__)
# a yaml Loader which loads mappings into OrderedDicts instead of regular
# dicts, so that we preserve the ordering of properties from the api files.
#
# with thanks to http://stackoverflow.com/a/21912744/637864
class OrderedLoader(yaml.Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
pairs = loader.construct_pairs(node)
return OrderedDict(pairs)
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
class TypeTable(object):
"""Describes a table documenting an object type
Attributes:
title(str|None): Title of the table - normally the object type
desc(str|None): description of the object
rows(list[TypeTableRow]): the rows in the table
"""
def __init__(self, title=None, desc=None, rows=[]):
self.title=title
self.desc=desc
self._rows = []
for row in rows:
self.add_row(row)
def add_row(self, row):
if not isinstance(row, TypeTableRow):
raise ValueError("Can only add TypeTableRows to TypeTable")
self._rows.append(row)
def __getattr__(self, item):
if item == 'rows':
return list(self._rows)
return super(TypeTable, self).__getattr__(item)
def __repr__(self):
return "TypeTable[%s, rows=%s]" % (self.title, self._rows)
class TypeTableRow(object):
"""Describes an object field defined in the json schema
"""
def __init__(self, key, title, desc, required=False):
self.key = key
self.title = title
self.desc = desc
self.required = required
def __repr__(self):
return "TypeTableRow[%s: %s]" % (self.key, self.desc)
def resolve_references(path, schema):
if isinstance(schema, dict):
# do $ref first
if '$ref' in schema:
value = schema['$ref']
path = os.path.join(os.path.dirname(path), value)
with open(path, encoding="utf-8") as f:
ref = yaml.load(f, OrderedLoader)
result = resolve_references(path, ref)
del schema['$ref']
else:
result = OrderedDict()
for key, value in schema.items():
result[key] = resolve_references(path, value)
return result
elif isinstance(schema, list):
return [resolve_references(path, value) for value in schema]
else:
return schema
def inherit_parents(obj):
"""
Recurse through the 'allOf' declarations in the object
"""
logger.debug("inherit_parents %r" % obj)
parents = obj.get("allOf", [])
if not parents:
return obj
result = {}
# settings defined in the child take priority over the parents, so we
# iterate through the parents first, and then overwrite with the settings
# from the child.
for p in list(map(inherit_parents, parents)) + [obj]:
# child blats out type, title and description
for key in ('type', 'title', 'description'):
if p.get(key):
result[key] = p[key]
# other fields get merged
for key in ('required', ):
if p.get(key):
result.setdefault(key, []).extend(p[key])
for key in ('properties', 'additionalProperties', 'patternProperties'):
if p.get(key):
result.setdefault(key, OrderedDict()).update(p[key])
return result
def get_json_schema_object_fields(obj, enforce_title=False):
"""Parse a JSON schema object definition
Args:
obj(dict): definition from the JSON schema file. $refs should already
have been resolved.
enforce_title (bool): if True, and the definition has no "title",
the 'title' result will be set to 'NO_TITLE' (otherwise it will be
set to None)
Returns:
dict: with the following fields:
- title (str): title (normally the type name) for the object
- tables (list[TypeTable]): list of the tables for the type
definition
"""
# Algorithm:
# f.e. property => add field info (if field is object then recurse)
if obj.get("type") != "object":
raise Exception(
"get_json_schema_object_fields: Object %s isn't an object." % obj
)
obj_title = obj.get("title")
logger.debug("Processing object with title '%s'", obj_title)
additionalProps = obj.get("additionalProperties")
props = obj.get("properties")
if additionalProps and not props:
# not "really" an object, just a KV store
logger.debug("%s is a pseudo-object", obj_title)
key_type = additionalProps.get("x-pattern", "string")
res = process_data_type(additionalProps)
return {
"title": "{%s: %s}" % (key_type, res["title"]),
"tables": res["tables"],
}
if not props:
props = obj.get("patternProperties")
if props:
# try to replace horrible regex key names with pretty x-pattern ones
for key_name in props.keys():
pretty_key = props[key_name].get("x-pattern")
if pretty_key:
props[pretty_key] = props[key_name]
del props[key_name]
# Sometimes you just want to specify that a thing is an object without
# doing all the keys.
if not props:
return {
"title": obj_title if obj_title else 'object',
"tables": [],
}
if enforce_title and not obj_title:
# Force a default titile of "NO_TITLE" to make it obvious in the
# specification output which parts of the schema are missing a title
obj_title = 'NO_TITLE'
required_keys = set(obj.get("required", []))
first_table_rows = []
tables = []
for key_name in props:
try:
logger.debug("Processing property %s.%s", obj_title, key_name)
required = key_name in required_keys
res = process_data_type(props[key_name], required)
first_table_rows.append(TypeTableRow(
key=key_name,
title=res["title"],
required=required,
desc=res["desc"],
))
tables.extend(res["tables"])
logger.debug("Done property %s" % key_name)
except Exception as e:
e2 = Exception("Error reading property %s.%s: %s" %
(obj_title, key_name, str(e)))
# throw the new exception with the old stack trace, so that
# we don't lose information about where the error occurred.
raise e2.with_traceback(sys.exc_info()[2])
tables.insert(0, TypeTable(title=obj_title, rows=first_table_rows))
for table in tables:
assert isinstance(table, TypeTable)
return {
"title": obj_title,
"tables": tables,
}
# process a data type definition. returns a dictionary with the keys:
# title: stringified type name
# desc: description
# enum_desc: description of permissible enum fields
# is_object: true if the data type is an object
# tables: list of additional table definitions
def process_data_type(prop, required=False, enforce_title=True):
prop = inherit_parents(prop)
prop_type = prop.get('oneOf', prop.get('type', []))
assert prop_type
tables = []
enum_desc = None
is_object = False
if prop_type == "object":
res = get_json_schema_object_fields(
prop,
enforce_title=enforce_title,
)
prop_title = res["title"]
tables = res["tables"]
is_object = True
elif prop_type == "array":
items = prop["items"]
# Items can be a list of schemas or a schema itself
# http://json-schema.org/latest/json-schema-validation.html#rfc.section.6.4
if isinstance(items, list):
nested_titles = []
for i in items:
nested = process_data_type(i)
tables.extend(nested['tables'])
nested_titles.append(nested['title'])
prop_title = "[%s]" % (", ".join(nested_titles), )
else:
nested = process_data_type(prop["items"])
prop_title = "[%s]" % nested["title"]
tables = nested["tables"]
enum_desc = nested["enum_desc"]
elif isinstance(prop_type, list):
prop_title = []
for t in prop_type:
if isinstance(t, dict):
nested = process_data_type(t)
tables.extend(nested['tables'])
prop_title.append(nested['title'])
# Assuming there's at most one enum among type options
enum_desc = nested['enum_desc']
if enum_desc:
enum_desc = "%s if the type is enum" % enum_desc
else:
prop_title.append(t)
else:
prop_title = prop_type
if prop.get("enum"):
if len(prop["enum"]) > 1:
prop_title = "enum"
enum_desc = (
"One of: %s" % json.dumps(prop["enum"])
)
else:
enum_desc = (
"Must be '%s'." % prop["enum"][0]
)
if isinstance(prop_title, list):
prop_title = " or ".join(prop_title)
rq = "**Required.**" if required else None
desc = " ".join(x for x in [rq, prop.get("description"), enum_desc] if x)
for table in tables:
assert isinstance(table, TypeTable)
return {
"title": prop_title,
"desc": desc,
"enum_desc": enum_desc,
"is_object": is_object,
"tables": tables,
}
def deduplicate_tables(tables):
# the result may contain duplicates, if objects are referred to more than
# once. Filter them out.
#
# Go through the tables backwards so that we end up with a breadth-first
# rather than depth-first ordering.
titles = set()
filtered = []
for table in reversed(tables):
if table.title in titles:
continue
titles.add(table.title)
filtered.append(table)
filtered.reverse()
return filtered
def get_tables_for_schema(schema):
pv = process_data_type(schema, enforce_title=False)
return deduplicate_tables(pv["tables"])
def get_tables_for_response(schema):
pv = process_data_type(schema, enforce_title=False)
tables = deduplicate_tables(pv["tables"])
# make up the first table, with just the 'body' row in, unless the response
# is an object, in which case there's little point in having one.
if not pv["is_object"]:
first_table_row = TypeTableRow(
key="<body>", title=pv["title"], desc=pv["desc"],
)
tables.insert(0, TypeTable(None, rows=[first_table_row]))
logger.debug("response: %r" % tables)
return tables
def get_example_for_schema(schema):
"""Returns a python object representing a suitable example for this object"""
schema = inherit_parents(schema)
if 'example' in schema:
example = schema['example']
return example
proptype = schema['type']
if proptype == 'object':
if 'properties' not in schema:
raise Exception('"object" property has neither properties nor example')
res = OrderedDict()
for prop_name, prop in schema['properties'].items():
logger.debug("Parsing property %r" % prop_name)
prop_example = get_example_for_schema(prop)
res[prop_name] = prop_example
return res
if proptype == 'array':
if 'items' not in schema:
raise Exception('"array" property has neither items nor example')
items = schema['items']
if isinstance(items, list):
return [get_example_for_schema(i) for i in items]
return [get_example_for_schema(items)]
if proptype == 'integer':
return 0
if proptype == 'string':
return proptype
raise Exception("Don't know to make an example %s" % proptype)
def get_example_for_param(param):
"""Returns a stringified example for a parameter"""
if 'x-example' in param:
return param['x-example']
schema = param.get('schema')
if not schema:
return None
exampleobj = None
if 'example' in schema:
exampleobj = schema['example']
if exampleobj is None:
exampleobj = get_example_for_schema(schema)
return json.dumps(exampleobj, indent=2)
def get_example_for_response(response):
"""Returns a stringified example for a response"""
exampleobj = None
if 'examples' in response:
exampleobj = response["examples"].get("application/json")
if exampleobj is None:
schema = response.get('schema')
if schema:
if schema['type'] == 'file':
# no example for 'file' responses
return None
exampleobj = get_example_for_schema(schema)
if exampleobj is None:
return None
return json.dumps(exampleobj, indent=2)
class MatrixUnits(Units):
def _load_swagger_meta(self, api, group_name):
endpoints = []
base_path = api.get("basePath", "")
for path in api["paths"]:
for method in api["paths"][path]:
logger.info(" ------- Endpoint: %s %s ------- " % (method, path))
try:
endpoint = self._handle_endpoint(
api["paths"][path][method], method,
base_path.rstrip("/") + path)
endpoints.append(endpoint)
except Exception as e:
logger.error("Error handling endpoint %s %s: %s",
method, path, e)
raise
return {
"base": api.get("basePath").rstrip("/"),
"group": group_name,
"endpoints": endpoints,
}
def _handle_endpoint(self, endpoint_swagger, method, path):
endpoint = {
"title": endpoint_swagger.get("summary", ""),
"deprecated": endpoint_swagger.get("deprecated", False),
"desc": endpoint_swagger.get("description",
endpoint_swagger.get("summary", "")),
"method": method.upper(),
"path": path.strip(),
"requires_auth": "security" in endpoint_swagger,
"rate_limited": 429 in endpoint_swagger.get("responses", {}),
"req_param_by_loc": {},
"req_body_tables": [],
"res_headers": None,
"res_tables": [],
"responses": [],
"example": {
"req": "",
}
}
path_template = path
example_query_params = []
example_body = ""
example_mime = "application/json"
for param in endpoint_swagger.get("parameters", []):
# even body params should have names, otherwise the active docs don't work.
param_name = param["name"]
try:
param_loc = param["in"]
if param_loc == "body":
self._handle_body_param(param, endpoint)
example_body = get_example_for_param(param)
continue
if param_loc == "header":
if param["name"] == "Content-Type" and param["x-example"]:
example_mime = param["x-example"]
# description
desc = param.get("description", "")
if param.get("required"):
desc = "**Required.** " + desc
# assign value expected for this param
val_type = param.get("type") # integer/string
if val_type == "array":
items = param.get("items")
if items:
if isinstance(items, list):
types = ", ".join(i.get("type") for i in items)
val_type = "[%s]" % (types,)
else:
val_type = "[%s]" % items.get("type")
if param.get("enum"):
val_type = "enum"
desc += (
" One of: %s" % json.dumps(param.get("enum"))
)
endpoint["req_param_by_loc"].setdefault(param_loc, []).append(
TypeTableRow(key=param_name, title=val_type, desc=desc),
)
example = get_example_for_param(param)
if example is None:
continue
if param_loc == "path":
path_template = path_template.replace(
"{%s}" % param_name, quote(example)
)
elif param_loc == "query":
if type(example) == list:
for value in example:
example_query_params.append((param_name, value))
else:
example_query_params.append((param_name, example))
except Exception as e:
raise Exception("Error handling parameter %s" % param_name, e)
# endfor[param]
good_response = None
for code in sorted(endpoint_swagger.get("responses", {}).keys()):
res = endpoint_swagger["responses"][code]
if not good_response and code == 200:
good_response = res
description = res.get("description", "")
example = get_example_for_response(res)
endpoint["responses"].append({
"code": code,
"description": description,
"example": example,
})
# add response params if this API has any.
if good_response:
if "schema" in good_response:
endpoint["res_tables"] = get_tables_for_response(
good_response["schema"]
)
if "headers" in good_response:
headers = TypeTable()
for (header_name, header) in good_response["headers"].items():
headers.add_row(
TypeTableRow(key=header_name, title=header["type"],
desc=header["description"]),
)
endpoint["res_headers"] = headers
query_string = "" if len(
example_query_params) == 0 else "?" + urlencode(
example_query_params)
if example_body:
endpoint["example"][
"req"] = "%s %s%s HTTP/1.1\nContent-Type: %s\n\n%s" % (
method.upper(), path_template, query_string, example_mime, example_body
)
else:
endpoint["example"]["req"] = "%s %s%s HTTP/1.1\n\n" % (
method.upper(), path_template, query_string
)
return endpoint
def _handle_body_param(self, param, endpoint_data):
"""Update endpoint_data object with the details of the body param
:param string filepath path to the yaml
:param dict param the parameter data from the yaml
:param dict endpoint_data dictionary of endpoint data to be updated
"""
try:
schema = inherit_parents(param["schema"])
if schema["type"] != "object":
logger.warn(
"Unsupported body type %s for %s %s", schema["type"],
endpoint_data["method"], endpoint_data["path"]
)
return
req_body_tables = get_tables_for_schema(schema)
if req_body_tables == []:
# no fields defined for the body.
return
# put the top-level parameters into 'req_param_by_loc', and the others
# into 'req_body_tables'
body_params = endpoint_data['req_param_by_loc'].setdefault("JSON body",[])
body_params.extend(req_body_tables[0].rows)
body_tables = req_body_tables[1:]
endpoint_data['req_body_tables'].extend(body_tables)
except Exception as e:
e2 = Exception(
"Error decoding body of API endpoint %s %s: %s" %
(endpoint_data["method"], endpoint_data["path"], e)
)
raise e2.with_traceback(sys.exc_info()[2])
def load_swagger_apis(self):
apis = {}
for path, suffix in HTTP_APIS.items():
for filename in os.listdir(path):
if not filename.endswith(".yaml"):
continue
filepath = os.path.join(path, filename)
logger.info("Reading swagger API: %s" % filepath)
with open(filepath, "r", encoding="utf-8") as f:
# strip .yaml
group_name = filename[:-5].replace("-", "_")
group_name = "%s_%s" % (group_name, suffix)
api = yaml.load(f, OrderedLoader)
api = resolve_references(filepath, api)
api["__meta"] = self._load_swagger_meta(
api, group_name
)
apis[group_name] = api
return apis
def load_swagger_definitions(self):
defs = {}
for path, prefix in SWAGGER_DEFINITIONS.items():
self._load_swagger_definitions_in_dir(defs, path, prefix)
return defs
def _load_swagger_definitions_in_dir(self, defs, path, prefix, recurse=True):
if not os.path.exists(path):
return defs
for filename in os.listdir(path):
filepath = os.path.join(path, filename)
if os.path.isdir(filepath) and recurse:
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", filename)
dir_prefix = "_".join([prefix, safe_name])
# We don't recurse because we have to stop at some point
self._load_swagger_definitions_in_dir(
defs, filepath, dir_prefix, recurse=False)
if not filename.endswith(".yaml"):
continue
filepath = os.path.join(path, filename)
logger.info("Reading swagger definition: %s" % filepath)
with open(filepath, "r", encoding="utf-8") as f:
# strip .yaml
group_name = re.sub(r"[^a-zA-Z0-9_]", "_", filename[:-5])
group_name = "%s_%s" % (prefix, group_name)
definition = yaml.load(f, OrderedLoader)
definition = resolve_references(filepath, definition)
if 'type' not in definition:
continue
try:
example = get_example_for_schema(definition)
except:
example = None
pass # do nothing - we don't care
if 'title' not in definition:
definition['title'] = "NO_TITLE"
definition['tables'] = get_tables_for_schema(definition)
defs[group_name] = {
"definition": definition,
"examples": [example] if example is not None else [],
}
return defs
def load_common_event_fields(self):
"""Parse the core event schema files
Returns:
dict: with the following properties:
"title": Event title (from the 'title' field of the schema)
"desc": desc
"tables": list[TypeTable]
"""
path = CORE_EVENT_SCHEMA
event_types = {}
for filename in os.listdir(path):
if not filename.endswith(".yaml"):
continue
filepath = os.path.join(path, filename)
event_type = filename[:-5] # strip the ".yaml"
logger.info("Reading event schema: %s" % filepath)
with open(filepath, encoding="utf-8") as f:
event_schema = yaml.load(f, OrderedLoader)
event_schema = resolve_references(filepath, event_schema)
schema_info = process_data_type(
event_schema,
enforce_title=True,
)
event_types[event_type] = schema_info
return event_types
def load_apis(self, substitutions):
cs_ver = substitutions.get("%CLIENT_RELEASE_LABEL%", "unstable")
fed_ver = substitutions.get("%SERVER_RELEASE_LABEL%", "unstable")
is_ver = substitutions.get("%IDENTITY_RELEASE_LABEL%", "unstable")
as_ver = substitutions.get("%APPSERVICE_RELEASE_LABEL%", "unstable")
push_gw_ver = substitutions.get("%PUSH_GATEWAY_RELEASE_LABEL%", "unstable")
# we abuse the typetable to return this info to the templates
return TypeTable(rows=[
TypeTableRow(
"`Client-Server API <client_server/"+cs_ver+".html>`_",
cs_ver,
"Interaction between clients and servers",
), TypeTableRow(
"`Server-Server API <server_server/"+fed_ver+".html>`_",
fed_ver,
"Federation between servers",
), TypeTableRow(
"`Application Service API <application_service/"+as_ver+".html>`_",
as_ver,
"Privileged server plugins",
), TypeTableRow(
"`Identity Service API <identity_service/"+is_ver+".html>`_",
is_ver,
"Mapping of third party IDs to Matrix IDs",
), TypeTableRow(
"`Push Gateway API <push_gateway/"+push_gw_ver+".html>`_",
push_gw_ver,
"Push notifications for Matrix events",
),
])
def load_event_examples(self):
path = EVENT_EXAMPLES
examples = {}
for filename in os.listdir(path):
if not filename.startswith("m."):
continue
event_name = filename.split("$")[0]
filepath = os.path.join(path, filename)
logger.info("Reading event example: %s" % filepath)
try:
with open(filepath, "r", encoding="utf-8") as f:
example = resolve_references(filepath, json.load(f))
examples[filename] = examples.get(filename, [])
examples[filename].append(example)
if filename != event_name:
examples[event_name] = examples.get(event_name, [])
examples[event_name].append(example)
except Exception as e:
e2 = Exception("Error reading event example "+filepath+": "+
str(e))
# throw the new exception with the old stack trace, so that
# we don't lose information about where the error occurred.
raise e2.with_traceback(sys.exc_info()[2])
return examples
def load_event_schemas(self):
path = EVENT_SCHEMA
schemata = {}
for filename in os.listdir(path):
if not filename.startswith("m."):
continue
filepath = os.path.join(path, filename)
try:
schemata[filename] = self.read_event_schema(filepath)
except Exception as e:
e2 = Exception("Error reading event schema "+filepath+": "+
str(e))
# throw the new exception with the old stack trace, so that
# we don't lose information about where the error occurred.
raise e2.with_traceback(sys.exc_info()[2])
return schemata
def read_event_schema(self, filepath):
logger.info("Reading %s" % filepath)
with open(filepath, "r", encoding="utf-8") as f:
json_schema = yaml.load(f, OrderedLoader)
schema = {
# one of "Message Event" or "State Event"
"typeof": "",
"typeof_info": "",
# event type, eg "m.room.member". Note *not* the type of the
# event object (which should always be 'object').
"type": None,
"title": None,
"desc": None,
"msgtype": None,
"type_with_msgtype": None, # for the template's sake
"content_fields": [
# <TypeTable>
]
}
# before we resolve the references, see if the first reference is to
# the message event or state event schemas, and add typeof info if so.
base_defs = {
ROOM_EVENT: "Message Event",
STATE_EVENT: "State Event"
}
if type(json_schema.get("allOf")) == list:
firstRef = json_schema["allOf"][0]["$ref"]
if firstRef in base_defs:
schema["typeof"] = base_defs[firstRef]
json_schema = resolve_references(filepath, json_schema)
# add type
schema["type"] = Units.prop(
json_schema, "properties/type/enum"
)[0]
# add summary and desc
schema["title"] = json_schema.get("title")
schema["desc"] = json_schema.get("description", "")
# walk the object for field info
schema["content_fields"] = get_tables_for_schema(
Units.prop(json_schema, "properties/content")
)
# grab msgtype if it is the right kind of event
msgtype = Units.prop(
json_schema, "properties/content/properties/msgtype/enum"
)
if msgtype:
schema["msgtype"] = msgtype[0] # enum prop
schema["type_with_msgtype"] = schema["type"] + " (" + msgtype[0] + ")"
# link to msgtypes for m.room.message
if schema["type"] == "m.room.message" and not msgtype:
schema["desc"] += (
" For more information on ``msgtypes``, see "+
"`m.room.message msgtypes`_."
)
# method types for m.key.verification.start
if schema["type"] == "m.key.verification.start":
methods = Units.prop(
json_schema, "properties/content/properties/method/enum"
)
if methods:
schema["type_with_msgtype"] = schema["type"] + " (" + methods[0] + ")"
# Assign state key info if it has some
if schema["typeof"] == "State Event":
skey_desc = Units.prop(
json_schema, "properties/state_key/description"
)
if not skey_desc:
raise Exception("Missing description for state_key")
schema["typeof_info"] = "``state_key``: %s" % skey_desc
return schema
def load_changelogs(self, substitutions):
"""Loads the changelog unit for later rendering in a section.
Args:
substitutions: dict of variable name to value. Provided by the gendoc script.
Returns:
A dict of API name ("client_server", for example) to changelog.
"""
changelogs = {}
# The APIs and versions we'll prepare changelogs for. We use the substitutions
# to ensure that we pick up the right version for generated documentation. This
# defaults to "unstable" as a version for incremental generated documentation (CI).
prepare_versions = {
"server_server": substitutions.get("%SERVER_RELEASE_LABEL%", "unstable"),
"client_server": substitutions.get("%CLIENT_RELEASE_LABEL%", "unstable"),
"identity_service": substitutions.get("%IDENTITY_RELEASE_LABEL%", "unstable"),
"push_gateway": substitutions.get("%PUSH_GATEWAY_RELEASE_LABEL%", "unstable"),
"application_service": substitutions.get("%APPSERVICE_RELEASE_LABEL%", "unstable"),
}
# Changelogs are split into two places: towncrier for the unstable changelog and
# the RST file for historical versions. If the prepare_versions dict above has
# a version other than "unstable" specified for an API, we'll use the historical
# changelog and otherwise generate the towncrier log in-memory.
for api_name, target_version in prepare_versions.items():
logger.info("Generating changelog for %s at %s" % (api_name, target_version,))
changelog_lines = []
if target_version == 'unstable':
# generate towncrier log
changelog_lines = self._read_towncrier_changelog(api_name)
else:
# read in the existing RST changelog
changelog_lines = self._read_rst_changelog(api_name)
# Parse the changelog lines to find the header we're looking for and therefore
# the changelog body.
prev_line = None
title_part = None
changelog_body_lines = []
for line in changelog_lines:
if prev_line is None:
prev_line = line
continue
if re.match("^[=]{3,}$", line.strip()):
# the last line was a header - use that as our new title_part
title_part = prev_line.strip()
# take off the last line from the changelog_body_lines because it's the title
if len(changelog_body_lines) > 0:
changelog_body_lines = changelog_body_lines[:len(changelog_body_lines) - 1]
continue
if re.match("^[-]{3,}$", line.strip()):
# the last line is a subheading - drop this line because it's the underline
# and that causes problems with rendering. We'll keep the header text though.
continue
if line.strip().startswith(".. "):
# skip comments
continue
if title_part == target_version:
# if we made it this far, append the line to the changelog body. We indent it so
# that it renders correctly in the section. We also add newlines so that there's
# intentionally blank lines that make rst2html happy.
changelog_body_lines.append(" " + line + '\n')
prev_line = line
if len(changelog_body_lines) > 0:
changelogs[api_name] = "".join(changelog_body_lines)
else:
raise ValueError("No changelog for %s at %s" % (api_name, target_version,))
# return our `dict[api_name] => changelog` as the last step.
return changelogs
def _read_towncrier_changelog(self, api_name):
tc_path = os.path.join(CHANGELOG_DIR, api_name)
if os.path.isdir(tc_path):
logger.info("Generating towncrier changelog for: %s" % api_name)
p = subprocess.Popen(
['towncrier', '--version', 'unstable', '--name', api_name, '--draft'],
cwd=tc_path,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if p.returncode != 0:
# Something broke - dump as much information as we can
logger.error("Towncrier exited with code %s" % p.returncode)
logger.error(stdout.decode('UTF-8'))
logger.error(stderr.decode('UTF-8'))
raw_log = ""
else:
raw_log = stdout.decode('UTF-8')
# This is a bit of a hack, but it does mean that the log at least gets *something*
# to tell us it broke
if not raw_log.startswith("unstable"):
logger.error("Towncrier appears to have failed to generate a changelog")
logger.error(raw_log)
raw_log = ""
return raw_log.splitlines()
return []
def _read_rst_changelog(self, api_name):
logger.info("Reading changelog RST for %s" % api_name)
rst_path = os.path.join(CHANGELOG_DIR, "%s.rst" % api_name)
with open(rst_path, 'r', encoding="utf-8") as f:
return f.readlines()
def load_unstable_warnings(self, substitutions):
warning = """
.. WARNING::
You are viewing an unstable version of this specification. Unstable
specifications may change at any time without notice. To view the
current specification, please `click here <latest.html>`_.
"""
warnings = {}
for var in substitutions.keys():
key = var[1:-1] # take off the surrounding %-signs
if substitutions.get(var, "unstable") == "unstable":
warnings[key] = warning
else:
warnings[key] = ""
return warnings
def load_spec_targets(self):
with open(TARGETS, "r") as f:
return yaml.load(f.read())
def load_git_version(self):
null = open(os.devnull, 'w')
cwd = os.path.dirname(os.path.abspath(__file__))
try:
git_branch = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
stderr=null,
cwd=cwd,
).strip().decode('UTF-8')
except subprocess.CalledProcessError:
git_branch = ""
try:
git_tag = subprocess.check_output(
['git', 'describe', '--exact-match'],
stderr=null,
cwd=cwd,
).strip().decode('UTF-8')
git_tag = "tag=" + git_tag
except subprocess.CalledProcessError:
git_tag = ""
try:
git_commit = subprocess.check_output(
['git', 'rev-parse', '--short', 'HEAD'],
stderr=null,
cwd=cwd,
).strip().decode('UTF-8')
except subprocess.CalledProcessError:
git_commit = ""
try:
dirty_string = "-this_is_a_dirty_checkout"
is_dirty = subprocess.check_output(
['git', 'describe', '--dirty=' + dirty_string, "--all"],
stderr=null,
cwd=cwd,
).strip().decode('UTF-8').endswith(dirty_string)
git_dirty = "dirty" if is_dirty else ""
except subprocess.CalledProcessError:
git_dirty = ""
git_version = "Unknown"
if git_branch or git_tag or git_commit or git_dirty:
git_version = ",".join(
s for s in
(git_branch, git_tag, git_commit, git_dirty,)
if s
).encode("ascii").decode('ascii')
return {
"string": git_version,
"revision": git_commit
}
def load_sas_emoji(self):
with open(SAS_EMOJI_JSON, 'r', encoding='utf-8') as sas_json:
emoji = json.load(sas_json)
# Verify the emoji matches the unicode
for c in emoji:
e = c['emoji']
logger.info("Checking emoji %s (%s)", e, c['description'])
u = re.sub(r'U\+([0-9a-fA-F]+)', lambda m: chr(int(m.group(1), 16)), c['unicode'])
if e != u:
raise Exception("Emoji %s should be %s not %s" % (
c['description'],
repr(e),
c['unicode'],
))
return emoji
|
the-stack_106_29219 | """Spark helper functions"""
import csv
import logging
import os
from pathlib import Path
from typing import AnyStr, List, Union
from pyspark.sql import Row, SparkSession
from pyspark.sql.types import StructType, StringType
from pyspark import SparkConf
from dsgrid.exceptions import DSGInvalidField
from dsgrid.utils.files import load_data
from dsgrid.utils.timing import Timer, track_timing, timer_stats_collector
logger = logging.getLogger(__name__)
def init_spark(name="dsgrid"):
"""Initialize a SparkSession."""
cluster = os.environ.get("SPARK_CLUSTER")
if cluster is not None:
logger.info("Create SparkSession %s on existing cluster %s", name, cluster)
conf = SparkConf().setAppName(name).setMaster(cluster)
spark = SparkSession.builder.config(conf=conf).getOrCreate()
else:
logger.info("Create SparkSession %s in local-mode cluster", name)
spark = SparkSession.builder.master("local").appName(name).getOrCreate()
logger.info("Spark conf: %s", str(spark.sparkContext.getConf().getAll()))
return spark
@track_timing(timer_stats_collector)
def create_dataframe(records, cache=False, require_unique=None):
"""Create a spark DataFrame from a list of records.
Parameters
----------
records : list
list of spark.sql.Row
cache : bool
If True, cache the DataFrame in memory.
require_unique : list
list of column names (str) to check for uniqueness
Returns
-------
spark.sql.DataFrame
"""
df = SparkSession.getActiveSession().createDataFrame(records)
_post_process_dataframe(df, cache=cache, require_unique=require_unique)
return df
@track_timing(timer_stats_collector)
def read_dataframe(filename, cache=False, require_unique=None, read_with_spark=True):
"""Create a spark DataFrame from a file.
Supported formats when read_with_spark=True: .csv, .json, .parquet
Supported formats when read_with_spark=False: .csv, .json
Parameters
----------
filename : str | Path
path to file
cache : bool
If True, cache the DataFrame in memory.
require_unique : list
list of column names (str) to check for uniqueness
read_with_spark : bool
If True, read the file with pyspark.read. Otherwise, read the file into
a list of dicts, convert to pyspark Rows, and then to a DataFrame.
Returns
-------
spark.sql.DataFrame
Raises
------
ValueError
Raised if a require_unique column has duplicate values.
"""
filename = Path(filename)
func = _read_with_spark if read_with_spark else _read_natively
df = func(filename)
_post_process_dataframe(df, cache=cache, require_unique=require_unique)
return df
def _read_with_spark(filename):
spark = SparkSession.getActiveSession()
path = str(filename)
if filename.suffix == ".csv":
df = spark.read.csv(path, inferSchema=True, header=True)
elif Path(filename).suffix == ".parquet":
df = spark.read.parquet(path)
elif Path(filename).suffix == ".json":
df = spark.read.json(path, mode="FAILFAST")
else:
assert False, f"Unsupported file extension: {filename}"
return df
def _read_natively(filename):
if filename.suffix == ".csv":
with open(filename, encoding="utf-8-sig") as f_in:
rows = [Row(**x) for x in csv.DictReader(f_in)]
elif Path(filename).suffix == ".json":
rows = load_data(filename)
else:
assert False, f"Unsupported file extension: {filename}"
return SparkSession.getActiveSession().createDataFrame(rows)
def _post_process_dataframe(df, cache=False, require_unique=None):
# TODO This is causing Spark warning messages. Disable until we know why.
# if cache:
# df = df.cache()
if require_unique is not None:
with Timer(timer_stats_collector, "check_unique"):
for column in require_unique:
unique = df.select(column).distinct()
if unique.count() != df.count():
raise DSGInvalidField(f"DataFrame has duplicate entries for {column}")
def get_unique_values(df, columns: Union[AnyStr, List]):
"""Return the unique values of a dataframe in one column or a list of columns.
Parameters
----------
df : pyspark.sql.DataFrame
column : str or list of str
Returns
-------
set
"""
dfc = df.select(columns).distinct().collect()
if isinstance(columns, list):
values = {tuple(getattr(row, col) for col in columns) for row in dfc}
else:
values = {getattr(x, columns) for x in dfc}
return values
@track_timing(timer_stats_collector)
def models_to_dataframe(models, cache=False):
"""Converts a list of Pydantic models to a Spark DataFrame.
Parameters
----------
models : list
cache : If True, cache the DataFrame.
Returns
-------
pyspark.sql.DataFrame
"""
assert models
cls = type(models[0])
rows = []
for model in models:
row = Row(**{f: getattr(model, f) for f in cls.__fields__})
rows.append(row)
df = SparkSession.getActiveSession().createDataFrame(rows)
if cache:
df.cache()
return df
@track_timing(timer_stats_collector)
def create_dataframe_from_dimension_ids(records, *dimension_types, cache=True):
"""Return a DataFrame created from the IDs of dimension_types.
Parameters
----------
records : sequence
Iterable of lists of record IDs
dimension_types : tuple
cache : If True, cache the DataFrame.
Returns
-------
pyspark.sql.DataFrame
"""
schema = StructType()
for dimension_type in dimension_types:
schema.add(dimension_type.value, StringType(), nullable=False)
df = SparkSession.getActiveSession().createDataFrame(records, schema=schema)
if cache:
df.cache()
return df
@track_timing(timer_stats_collector)
def check_for_nulls(df, exclude_columns=None):
"""Check if a DataFrame has null values.
Parameters
----------
df : spark.sql.DataFrame
exclude_columns : None or Set
Raises
------
DSGInvalidField
Raised if null exists in any column.
"""
if exclude_columns is None:
exclude_columns = set()
cols_to_check = set(df.columns).difference(exclude_columns)
cols_str = ", ".join(cols_to_check)
filter_str = " OR ".join((f"{x} is NULL" for x in cols_to_check))
df.createOrReplaceTempView("tmp_table")
try:
# Avoid iterating with many checks unless we know there is at least one failure.
nulls = sql(f"SELECT {cols_str} FROM tmp_table WHERE {filter_str}")
if not nulls.rdd.isEmpty():
cols_with_null = set()
for col in cols_to_check:
if not nulls.select(col).filter(f"{col} is NULL").rdd.isEmpty():
cols_with_null.add(col)
assert cols_with_null, "Did not find any columns with NULL values"
raise DSGInvalidField(
f"DataFrame contains NULL value(s) for column(s): {cols_with_null}"
)
finally:
sql("DROP VIEW tmp_table")
def sql(query):
"""Run a SQL query with Spark.
Parameters
----------
query : str
Returns
-------
pyspark.sql.DataFrame
"""
logger.debug("Run SQL query [%s]", query)
return SparkSession.getActiveSession().sql(query)
def sql_from_sqlalchemy(query):
"""Run a SQL query with Spark where the query was generated by sqlalchemy.
Parameters
----------
query : sqlalchemy.orm.query.Query
Returns
-------
pyspark.sql.DataFrame
"""
logger.debug("sqlchemy query = %s", query)
return sql(str(query).replace('"', ""))
|
the-stack_106_29220 | """meltano run command and supporting functions."""
from typing import List, Union
import click
import structlog
from meltano.core.block.blockset import BlockSet
from meltano.core.block.parser import BlockParser, validate_block_sets
from meltano.core.block.plugin_command import PluginCommandBlock
from meltano.core.project import Project
from meltano.core.runner import RunnerError
from meltano.core.tracking import GoogleAnalyticsTracker
from meltano.core.utils import click_run_async
from . import CliError, cli
from .params import pass_project
logger = structlog.getLogger(__name__)
@cli.command(short_help="[preview] Run a set of plugins in series.")
@click.option(
"--full-refresh",
help="Perform a full refresh (ignore state left behind by any previous runs). Applies to all pipelines.",
is_flag=True,
)
@click.option(
"--no-state-update",
help="Run without state saving. Applies to all pipelines.",
is_flag=True,
)
@click.option(
"--force",
"-f",
help="Force a new run even if a pipeline with the same Job ID is already present. Applies to all pipelines.",
is_flag=True,
)
@click.argument(
"blocks",
nargs=-1,
)
@pass_project(migrate=True)
@click_run_async
async def run(
project: Project,
full_refresh: bool,
no_state_update: bool,
force: bool,
blocks: List[str],
):
"""
Run a set of command blocks in series.
Blocks are specified as a list of plugin names, e.g.
`meltano run some_extractor some_loader some_plugin:some_command` and are run in the order they are specified
from left to right. A failure in any block will cause the entire run to abort.
Multiple commmand blocks can be chained together or repeated, and tap/target pairs will automatically be linked:
`meltano run tap-gitlab target-postgres dbt:test dbt:run`\n
`meltano run tap-gitlab target-postgres tap-salesforce target-mysql ...`\n
`meltano run tap-gitlab target-postgres dbt:run tap-postgres target-bigquery ...`\n
When running within an active environment, meltano run activates incremental job support. Job ID's are autogenerated
using the format `{active_environment.name}:{extractor_name}-to-{loader_name}` for each extract/load pair found:
`meltano --environment=prod run tap-gitlab target-postgres tap-salesforce target-mysql`\n
The above command will create two jobs with the IDs `prod:tap-gitlab-to-target-postgres` and `prod:tap-salesforce-to-target-mysql`.
This a preview feature - its functionality and cli signature is still evolving.
\b\nRead more at https://meltano.com/docs/command-line-interface.html#run
"""
parser = BlockParser(logger, project, blocks, full_refresh, no_state_update, force)
parsed_blocks = list(parser.find_blocks(0))
if not parsed_blocks:
logger.info("No valid blocks found.")
return
if validate_block_sets(logger, parsed_blocks):
logger.debug("All ExtractLoadBlocks validated, starting execution.")
else:
raise CliError("Some ExtractLoadBlocks set failed validation.")
await _run_blocks(parsed_blocks)
tracker = GoogleAnalyticsTracker(project)
tracker.track_meltano_run(blocks)
async def _run_blocks(parsed_blocks: List[Union[BlockSet, PluginCommandBlock]]) -> None:
for idx, blk in enumerate(parsed_blocks):
try:
await blk.run()
except RunnerError as err:
logger.error(
"Block run completed.",
set_number=idx,
block_type=blk.__class__.__name__,
success=False,
err=err,
exit_codes=err.exitcodes,
)
raise CliError(
f"Run invocation could not be completed as block failed: {err}"
) from err
logger.info(
"Block run completed.",
set_number=idx,
block_type=blk.__class__.__name__,
success=True,
err=None,
)
|
the-stack_106_29221 | """
*************************************************************************
* Copyright 2020 Adobe. All rights reserved.
* This file is licensed to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
**************************************************************************/
"""
from converter.aem_dispatcher_converter import AEMDispatcherConverter
from util import constants
from argparse import ArgumentParser
from shutil import copytree, rmtree
from os.path import exists
parser = ArgumentParser()
parser.add_argument('--sdk_src', help='Absolute path to the src folder of the dispatcher sdk')
parser.add_argument('--cfg', help='Absolute path to dispatcher config folder')
args = parser.parse_args()
# if `target` folder already exists, delete it
if exists(constants.TARGET_FOLDER):
rmtree(constants.TARGET_FOLDER)
copytree(args.cfg, constants.TARGET_DISPATCHER_SRC_FOLDER, True)
converter = AEMDispatcherConverter(args.sdk_src, constants.TARGET_DISPATCHER_SRC_FOLDER)
converter.__transform__()
print("\nTransformation Complete!\n")
print("Please check", constants.TARGET_DISPATCHER_SRC_FOLDER, "folder for transformed configuration files.")
print("Please check", constants.SUMMARY_REPORT_FILE, "for summary report.")
print("Please check", constants.LOG_FILE, "for logs.")
|
the-stack_106_29224 | import os
import time
import pandas as pd
from tqdm import tqdm
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.algo.discovery.inductive import algorithm as inductive_miner
from pm4py.algo.conformance.tokenreplay import algorithm as token_replay
from skmultiflow.utils import calculate_object_size
from utils import read_log
from utils import sort_alphanumeric
from utils import retrieve_traces
def compute_alignments(replayed_traces):
trace_is_fit, trace_fitness, missing_tokens, consumed_tokens, remaining_tokens, produced_tokens = [], [], [], [], [], []
for replayed in replayed_traces:
trace_is_fit.append(replayed['trace_is_fit'])
trace_fitness.append(float(replayed['trace_fitness']))
missing_tokens.append(float(replayed['missing_tokens']))
consumed_tokens.append(float(replayed['consumed_tokens']))
remaining_tokens.append(float(replayed['remaining_tokens']))
produced_tokens.append(float(replayed['produced_tokens']))
return [trace_is_fit, trace_fitness, missing_tokens, consumed_tokens, remaining_tokens, produced_tokens]
path = './event_logs'
save_path = './encoding_results/tokenreplay'
os.makedirs(save_path, exist_ok=True)
for file in tqdm(sort_alphanumeric(os.listdir(path))):
# read event log and import case id and labels
ids, traces, y = retrieve_traces(read_log(path, file))
# import xes log for process discovery
file_xes = file.split('.csv')[0]
log = xes_importer.apply(f'{path}_xes/{file_xes}.xes')
start_time = time.time()
# generate process model
net, initial_marking, final_marking = inductive_miner.apply(log)
# calculating tokenreplay
replayed_traces = token_replay.apply(log, net, initial_marking, final_marking)
end_time = time.time() - start_time
memory = calculate_object_size(replayed_traces)
final_token_replay = compute_alignments(replayed_traces)
# saving
out_df = pd.DataFrame()
out_df['trace_is_fit'] = final_token_replay[0]
out_df['trace_fitness'] = final_token_replay[1]
out_df['missing_tokens'] = final_token_replay[2]
out_df['consumed_tokens'] = final_token_replay[3]
out_df['remaining_tokens'] = final_token_replay[4]
out_df['produced_tokens'] = final_token_replay[5]
out_df['case'] = ids
out_df['time'] = end_time
out_df['memory'] = memory
out_df['label'] = y
out_df.to_csv(f'{save_path}/{file}', index=False)
|
the-stack_106_29225 | import rlkit.misc.hyperparameter as hyp
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.sets.vae_launcher import train_set_vae
if __name__ == "__main__":
variant = dict(
env_id='OneObject-PickAndPlace-BigBall-RandomInit-2D-v1',
renderer_kwargs=dict(
output_image_format='CHW',
),
create_vae_kwargs=dict(
latent_dim=128,
use_fancy_architecture=True,
decoder_distribution='gaussian_fixed_unit_variance',
),
vae_trainer_kwargs=dict(
vae_lr=1e-3,
vae_visualization_config=dict(
num_recons=10,
num_samples=20,
# debug_period=50,
debug_period=10,
unnormalize_images=True,
image_format='CHW',
),
beta=2,
set_loss_weight=0,
),
reward_visualization_period=10,
beta_scale_schedule_kwargs=dict(
version='piecewise_linear',
x_values=[0, 20, 40, 60, 80, 100],
),
data_loader_kwargs=dict(
batch_size=128,
),
vae_algo_kwargs=dict(
num_iters=101,
num_epochs_per_iter=20,
progress_csv_file_name='vae_progress.csv',
),
include_env_debug=True,
generate_test_set_kwargs=dict(
num_samples_per_set=128,
set_configs=[
dict(
version='move_a_to_b',
offsets_from_b=(4, 0),
a_axis_to_b_axis={
0: 2,
1: 3,
},
),
],
),
generate_train_set_kwargs=dict(
num_samples_per_set=128,
set_configs=[
dict(
version='move_a_to_b',
offsets_from_b=(4, 0),
a_axis_to_b_axis={
0: 2,
1: 3,
},
),
],
),
num_ungrouped_images=12800,
logger_config=dict(
push_prefix=False,
),
)
n_seeds = 1
mode = 'local'
exp_name = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 1
mode = 'sss'
exp_name = __file__.split('/')[-1].split('.')[0].replace('_', '-')
print('exp_name', exp_name)
search_space = {
'vae_algo_kwargs.num_iters': [101],
'create_vae_kwargs.decoder_distribution': [
'gaussian_fixed_unit_variance',
],
'create_vae_kwargs.use_fancy_architecture': [
True,
],
'vae_trainer_kwargs.set_loss_weight': [
0.,
1.,
2.,
5.,
10.,
],
'create_vae_kwargs.latent_dim': [
8,
],
'beta_scale_schedule_kwargs.y_values': [
[1, 1, 1, 1, 1, 1],
[0, 1, 5, 10, 10, 10],
[0, 10, 20, 30, 40, 50],
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = list(sweeper.iterate_hyperparameters())
for _ in range(n_seeds):
for exp_id, variant in enumerate(variants):
variant['vae_trainer_kwargs']['beta'] = (
1. / variant['create_vae_kwargs']['latent_dim']
)
variant['vae_trainer_kwargs']['debug_bad_recons'] = (
variant['create_vae_kwargs']['decoder_distribution'] ==
'gaussian_learned_global_scalar_variance'
)
if mode == 'local':
variant['vae_algo_kwargs']['num_iters'] = 1
variant['vae_algo_kwargs']['num_epochs_per_iter'] = 1
# variant['generate_train_set_kwargs']['saved_filename'] = (
# 'manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle'
# )
run_experiment(
train_set_vae,
exp_name=exp_name,
prepend_date_to_exp_name=True,
num_exps_per_instance=2,
mode=mode,
variant=variant,
# slurm_config_name='cpu',
use_gpu=True,
# gpu_id=1,
)
|
the-stack_106_29226 | import http.client
con_obj = http.client.HTTPSConnection("www.imdb.com")
con_obj.request("GET", "/")
response = con_obj.getresponse()
print("Status: {}".format(response.status))
read_data = response.read(1000)
print(read_data)
con_obj.close()
|
the-stack_106_29228 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
__all__ = ['WideResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101']
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class WideBottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(WideBottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(
planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class WideResNet(nn.Module):
def __init__(self,
block,
layers,
sample_height,
sample_width,
sample_duration,
k=1,
shortcut_type='B',
num_classes=400):
self.inplanes = 64
super(WideResNet, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64 * k, layers[0], shortcut_type)
self.layer2 = self._make_layer(
block, 128 * k, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(
block, 256 * k, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(
block, 512 * k, layers[3], shortcut_type, stride=2)
last_duration = int(math.ceil(sample_duration / 16))
last_size_height = int(math.ceil(sample_height / 32))
last_size_width = int(math.ceil(sample_width / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size_height, last_size_width), stride=1)
self.fc = nn.Linear(512 * k * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = WideResNet(WideBottleneck, [3, 4, 6, 3], **kwargs)
return model
|
the-stack_106_29230 | # -*- coding: utf-8 -*-
import logging
from subprocess import CalledProcessError, check_call
from apps.offline.models import THUMBNAIL_HEIGHT
def create_thumbnail(instance):
logger = logging.getLogger(__name__)
logger.debug('Checking for thumbnail for "%s".' % instance.title)
if instance.thumbnail_exists is False:
logger.debug('Thumbnail for "%s" not found - creating...' % instance.title)
# Fixes an annoying Exception in logs, not really needed
# http://stackoverflow.com/questions/13193278/ {
import threading
threading._DummyThread._Thread__stop = lambda x: 42
# }
try:
check_call(["convert", "-resize", "x" + str(THUMBNAIL_HEIGHT), instance.url + "[0]", instance.thumbnail])
except (OSError, CalledProcessError) as e:
logger.debug("ERROR: {0}".format(e))
logger.debug('Thumbnail created, and is located at: %s' % instance.thumbnail)
else:
logger.debug('Thumbnail already exists, and is located at: %s' % instance.thumbnail)
|
the-stack_106_29234 | # pylint: disable=E1101
from warnings import catch_warnings
from datetime import datetime, timedelta
from functools import partial
from textwrap import dedent
from operator import methodcaller
import pytz
import pytest
import dateutil
import numpy as np
import pandas as pd
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
from pandas import (Series, DataFrame, Panel, Index, isna,
notna, Timestamp)
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.compat import range, lrange, zip, product, OrderedDict
from pandas.core.base import SpecificationError, AbstractMethodError
from pandas.errors import UnsupportedFunctionCall
from pandas.core.groupby import DataError
from pandas.tseries.frequencies import MONTHS, DAYS
from pandas.tseries.frequencies import to_offset
from pandas.core.indexes.datetimes import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.core.indexes.period import period_range, PeriodIndex, Period
from pandas.core.resample import (DatetimeIndex, TimeGrouper,
DatetimeIndexResampler)
from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
from pandas._libs.period import IncompatibleFrequency
bday = BDay()
# The various methods we support
downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem',
'median', 'prod', 'var', 'ohlc']
upsample_methods = ['count', 'size']
series_methods = ['nunique']
resample_methods = downsample_methods + upsample_methods + series_methods
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestResampleAPI(object):
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
self.frame = DataFrame(
{'A': self.series, 'B': self.series, 'C': np.arange(len(dti))})
def test_str(self):
r = self.series.resample('H')
assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, '
'label=left, convention=start, base=0]' in str(r))
def test_api(self):
r = self.series.resample('H')
result = r.mean()
assert isinstance(result, Series)
assert len(result) == 217
r = self.series.to_frame().resample('H')
result = r.mean()
assert isinstance(result, DataFrame)
assert len(result) == 217
def test_api_changes_v018(self):
# change from .resample(....., how=...)
# to .resample(......).how()
r = self.series.resample('H')
assert isinstance(r, DatetimeIndexResampler)
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how=how)
expected = getattr(self.series.resample('H'), how)()
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how='ohlc')
expected = self.series.resample('H').ohlc()
tm.assert_frame_equal(result, expected)
# compat for pandas-like methods
for how in ['sort_values', 'isna']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(r, how)()
# invalids as these can be setting operations
r = self.series.resample('H')
pytest.raises(ValueError, lambda: r.iloc[0])
pytest.raises(ValueError, lambda: r.iat[0])
pytest.raises(ValueError, lambda: r.loc[0])
pytest.raises(ValueError, lambda: r.loc[
Timestamp('2013-01-01 00:00:00', offset='H')])
pytest.raises(ValueError, lambda: r.at[
Timestamp('2013-01-01 00:00:00', offset='H')])
def f():
r[0] = 5
pytest.raises(ValueError, f)
# str/repr
r = self.series.resample('H')
with tm.assert_produces_warning(None):
str(r)
with tm.assert_produces_warning(None):
repr(r)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
tm.assert_numpy_array_equal(np.array(r), np.array(r.mean()))
# masquerade as Series/DataFrame as needed for API compat
assert isinstance(self.series.resample('H'), ABCSeries)
assert not isinstance(self.frame.resample('H'), ABCSeries)
assert not isinstance(self.series.resample('H'), ABCDataFrame)
assert isinstance(self.frame.resample('H'), ABCDataFrame)
# bin numeric ops
for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), pd.Series)
# unary numeric ops
for op in ['__pos__', '__neg__', '__abs__', '__inv__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(), pd.Series)
# comparison ops
for op in ['__lt__', '__le__', '__gt__', '__ge__', '__eq__', '__ne__']:
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), pd.Series)
# IPython introspection shouldn't trigger warning GH 13618
for op in ['_repr_json', '_repr_latex',
'_ipython_canary_method_should_not_exist_']:
r = self.series.resample('H')
with tm.assert_produces_warning(None):
getattr(r, op, None)
# getitem compat
df = self.series.to_frame('foo')
# same as prior versions for DataFrame
pytest.raises(KeyError, lambda: df.resample('H')[0])
# compat for Series
# but we cannot be sure that we need a warning here
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')[0]
expected = self.series.resample('H').mean()[0]
assert result == expected
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')['2005-01-09 23:00:00']
expected = self.series.resample('H').mean()['2005-01-09 23:00:00']
assert result == expected
def test_groupby_resample_api(self):
# GH 12448
# .groupby(...).resample(...) hitting warnings
# when appropriate
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
# replication step
i = pd.date_range('2016-01-03', periods=8).tolist() + \
pd.date_range('2016-01-17', periods=8).tolist()
index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i],
names=['group', 'date'])
expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]},
index=index)
result = df.groupby('group').apply(
lambda x: x.resample('1D').ffill())[['val']]
assert_frame_equal(result, expected)
def test_groupby_resample_on_api(self):
# GH 15021
# .groupby(...).resample(on=...) results in an unexpected
# keyword warning.
df = pd.DataFrame({'key': ['A', 'B'] * 5,
'dates': pd.date_range('2016-01-01', periods=10),
'values': np.random.randn(10)})
expected = df.set_index('dates').groupby('key').resample('D').mean()
result = df.groupby('key').resample('D', on='dates').mean()
assert_frame_equal(result, expected)
def test_plot_api(self):
tm._skip_if_no_mpl()
# .resample(....).plot(...)
# hitting warnings
# GH 12448
s = Series(np.random.randn(60),
index=date_range('2016-01-01', periods=60, freq='1min'))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min').plot()
tm.assert_is_valid_plot_return_object(result)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min', how='sum').plot()
tm.assert_is_valid_plot_return_object(result)
def test_getitem(self):
r = self.frame.resample('H')
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.resample('H')['B']
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
def test_select_bad_cols(self):
g = self.frame.resample('H')
pytest.raises(KeyError, g.__getitem__, ['D'])
pytest.raises(KeyError, g.__getitem__, ['A', 'D'])
with tm.assert_raises_regex(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'D']]
def test_attribute_access(self):
r = self.frame.resample('H')
tm.assert_series_equal(r.A.sum(), r['A'].sum())
# getting
pytest.raises(AttributeError, lambda: r.F)
# setting
def f():
r.F = 'bah'
pytest.raises(ValueError, f)
def test_api_compat_before_use(self):
# make sure that we are setting the binner
# on these attributes
for attr in ['groups', 'ngroups', 'indices']:
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = pd.Series(np.arange(len(rng)), index=rng)
rs = ts.resample('30s')
# before use
getattr(rs, attr)
# after grouper is initialized is ok
rs.mean()
getattr(rs, attr)
def tests_skip_nuisance(self):
df = self.frame
df['D'] = 'foo'
r = df.resample('H')
result = r[['A', 'B']].sum()
expected = pd.concat([r.A.sum(), r.B.sum()], axis=1)
assert_frame_equal(result, expected)
expected = r[['A', 'B', 'C']].sum()
result = r.sum()
assert_frame_equal(result, expected)
def test_downsample_but_actually_upsampling(self):
# this is reindex / asfreq
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng)
result = ts.resample('20s').asfreq()
expected = Series([0, 20, 40, 60, 80],
index=pd.date_range('2012-01-01 00:00:00',
freq='20s',
periods=5))
assert_series_equal(result, expected)
def test_combined_up_downsampling_of_irregular(self):
# since we are reallydoing an operation like this
# ts2.resample('2s').mean().ffill()
# preserve these semantics
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = pd.Series(np.arange(len(rng)), index=rng)
ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = ts2.resample('2s', how='mean', fill_method='ffill')
expected = ts2.resample('2s').mean().ffill()
assert_series_equal(result, expected)
def test_transform(self):
r = self.series.resample('20min')
expected = self.series.groupby(
pd.Grouper(freq='20min')).transform('mean')
result = r.transform('mean')
assert_series_equal(result, expected)
def test_fillna(self):
# need to upsample here
rng = pd.date_range('1/1/2012', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng)
r = ts.resample('s')
expected = r.ffill()
result = r.fillna(method='ffill')
assert_series_equal(result, expected)
expected = r.bfill()
result = r.fillna(method='bfill')
assert_series_equal(result, expected)
with pytest.raises(ValueError):
r.fillna(0)
def test_apply_without_aggregation(self):
# both resample and groupby should work w/o aggregation
r = self.series.resample('20min')
g = self.series.groupby(pd.Grouper(freq='20min'))
for t in [g, r]:
result = t.apply(lambda x: x)
assert_series_equal(result, self.series)
def test_agg_consistency(self):
# make sure that we are consistent across
# similar aggregations with and w/o selection list
df = DataFrame(np.random.randn(1000, 3),
index=pd.date_range('1/1/2012', freq='S', periods=1000),
columns=['A', 'B', 'C'])
r = df.resample('3T')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})
result = r.agg({'r1': 'mean', 'r2': 'sum'})
assert_frame_equal(result, expected)
# TODO: once GH 14008 is fixed, move these tests into
# `Base` test class
def test_agg(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = pd.DataFrame(np.random.rand(10, 2),
columns=list('AB'),
index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'],
['mean', 'std']])
for t in cases:
result = t.aggregate([np.mean, np.std])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, b_std], axis=1)
for t in cases:
result = t.aggregate({'A': np.mean,
'B': np.std})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std']})
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
for t in cases:
result = t['A'].aggregate(['mean', 'sum'])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum'),
('B', 'mean2'),
('B', 'sum2')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},
'B': {'mean2': 'mean', 'sum2': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'),
('r1', 'A', 'sum'),
('r2', 'B', 'mean'),
('r2', 'B', 'sum')])
def test_agg_misc(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = pd.DataFrame(np.random.rand(10, 2),
columns=list('AB'),
index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
# passed lambda
for t in cases:
result = t.agg({'A': np.sum,
'B': lambda x: np.std(x, ddof=1)})
rcustom = t['B'].apply(lambda x: np.std(x, ddof=1))
expected = pd.concat([r['A'].sum(), rcustom], axis=1)
assert_frame_equal(result, expected, check_like=True)
# agg with renamers
expected = pd.concat([t['A'].sum(),
t['B'].sum(),
t['A'].mean(),
t['B'].mean()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'),
('result1', 'B'),
('result2', 'A'),
('result2', 'B')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),
('result2', np.mean)]))
assert_frame_equal(result, expected, check_like=True)
# agg with different hows
expected = pd.concat([t['A'].sum(),
t['A'].std(),
t['B'].mean(),
t['B'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.agg(OrderedDict([('A', ['sum', 'std']),
('B', ['mean', 'std'])]))
assert_frame_equal(result, expected, check_like=True)
# equivalent of using a selection list / or not
for t in cases:
result = t[['A', 'B']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# series like aggs
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std']})
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std')])
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([t['A'].agg(['sum', 'std']),
t['A'].agg(['mean', 'std'])],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# errors
# invalid names in the agg specification
for t in cases:
def f():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
t[['A']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
pytest.raises(SpecificationError, f)
def test_agg_nested_dicts(self):
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = pd.DataFrame(np.random.rand(10, 2),
columns=list('AB'),
index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
for t in cases:
def f():
t.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
pytest.raises(ValueError, f)
for t in cases:
expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(),
t['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
def test_selection_api_validation(self):
# GH 13500
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
df = pd.DataFrame({'date': index,
'a': np.arange(len(index), dtype=np.int64)},
index=pd.MultiIndex.from_arrays([
np.arange(len(index), dtype=np.int64),
index], names=['v', 'd']))
df_exp = pd.DataFrame({'a': np.arange(len(index), dtype=np.int64)},
index=index)
# non DatetimeIndex
with pytest.raises(TypeError):
df.resample('2D', level='v')
with pytest.raises(ValueError):
df.resample('2D', on='date', level='d')
with pytest.raises(TypeError):
df.resample('2D', on=['a', 'date'])
with pytest.raises(KeyError):
df.resample('2D', level=['a', 'date'])
# upsampling not allowed
with pytest.raises(ValueError):
df.resample('2D', level='d').asfreq()
with pytest.raises(ValueError):
df.resample('2D', on='date').asfreq()
exp = df_exp.resample('2D').sum()
exp.index.name = 'date'
assert_frame_equal(exp, df.resample('2D', on='date').sum())
exp.index.name = 'd'
assert_frame_equal(exp, df.resample('2D', level='d').sum())
class Base(object):
"""
base class for resampling testing, calling
.create_series() generates a series of each index type
"""
def create_index(self, *args, **kwargs):
""" return the _index_factory created using the args, kwargs """
factory = self._index_factory()
return factory(*args, **kwargs)
@pytest.fixture
def _index_start(self):
return datetime(2005, 1, 1)
@pytest.fixture
def _index_end(self):
return datetime(2005, 1, 10)
@pytest.fixture
def _index_freq(self):
return 'D'
@pytest.fixture
def index(self, _index_start, _index_end, _index_freq):
return self.create_index(_index_start, _index_end, freq=_index_freq)
@pytest.fixture
def _series_name(self):
raise AbstractMethodError(self)
@pytest.fixture
def _static_values(self, index):
return np.arange(len(index))
@pytest.fixture
def series(self, index, _series_name, _static_values):
return Series(_static_values, index=index, name=_series_name)
@pytest.fixture
def frame(self, index, _static_values):
return DataFrame({'value': _static_values}, index=index)
@pytest.fixture(params=[Series, DataFrame])
def series_and_frame(self, request, index, _series_name, _static_values):
if request.param == Series:
return Series(_static_values, index=index, name=_series_name)
if request.param == DataFrame:
return DataFrame({'value': _static_values}, index=index)
@pytest.mark.parametrize('freq', ['2D', '1H'])
def test_asfreq(self, series_and_frame, freq):
obj = series_and_frame
result = obj.resample(freq).asfreq()
if freq == '2D':
new_index = obj.index.take(np.arange(0, len(obj.index), 2))
new_index.freq = to_offset('2D')
else:
new_index = self.create_index(obj.index[0], obj.index[-1],
freq=freq)
expected = obj.reindex(new_index)
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
result = s.resample('1H').asfreq()
new_index = self.create_index(s.index[0], s.index[-1], freq='1H')
expected = s.reindex(new_index)
assert_series_equal(result, expected)
frame = s.to_frame('value')
frame.iloc[1] = None
result = frame.resample('1H').asfreq(fill_value=4.0)
new_index = self.create_index(frame.index[0],
frame.index[-1], freq='1H')
expected = frame.reindex(new_index, fill_value=4.0)
assert_frame_equal(result, expected)
def test_resample_interpolate(self):
# # 12925
df = self.create_series().to_frame('value')
assert_frame_equal(
df.resample('1T').asfreq().interpolate(),
df.resample('1T').interpolate())
def test_raises_on_non_datetimelike_index(self):
# this is a non datetimelike index
xp = DataFrame()
pytest.raises(TypeError, lambda: xp.resample('A').mean())
def test_resample_empty_series(self):
# GH12771 & GH12868
s = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
# need to test for ohlc from GH13083
methods = [method for method in resample_methods
if method != 'ohlc']
for method in methods:
result = getattr(s.resample(freq), method)()
expected = s.copy()
expected.index = s.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_series_equal(result, expected, check_dtype=False)
def test_resample_empty_dataframe(self):
# GH13212
index = self.create_series().index[:0]
f = DataFrame(index=index)
for freq in ['M', 'D', 'H']:
# count retains dimensions too
methods = downsample_methods + upsample_methods
for method in methods:
result = getattr(f.resample(freq), method)()
if method != 'size':
expected = f.copy()
else:
# GH14962
expected = Series([])
expected.index = f.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_almost_equal(result, expected, check_dtype=False)
# test size for GH13212 (currently stays as df)
def test_resample_empty_dtypes(self):
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
for index in tm.all_timeseries_index_generator(0):
for dtype in (np.float, np.int, np.object, 'datetime64[ns]'):
for how in downsample_methods + upsample_methods:
empty_series = pd.Series([], index, dtype)
try:
getattr(empty_series.resample('d'), how)()
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object)
pass
def test_resample_loffset_arg_type(self):
# GH 13218, 15002
df = self.create_series().to_frame('value')
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
if isinstance(expected_index, PeriodIndex):
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
for arg in ['mean', {'value': 'mean'}, ['mean']]:
result_agg = df.resample('2D', loffset='2H').agg(arg)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result_how = df.resample('2D', how=arg, loffset='2H')
if isinstance(arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value',
'mean')])
# GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex
if isinstance(expected.index, TimedeltaIndex):
with pytest.raises(AssertionError):
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
else:
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
def test_apply_to_empty_series(self):
# GH 14313
series = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
result = series.resample(freq).apply(lambda x: 1)
expected = series.resample(freq).apply(np.sum)
assert_series_equal(result, expected, check_dtype=False)
class TestDatetimeIndex(Base):
_index_factory = lambda x: date_range
@pytest.fixture
def _series_name(self):
return 'dti'
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def create_series(self):
i = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='dti')
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10),
index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right').mean()
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=exp_idx)
assert_series_equal(result, expected)
assert result.index.name == 'index'
result = s.resample('5min', closed='left', label='right').mean()
exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min',
name='index')
expected = Series([s[:5].mean(), s[5:10].mean(),
s[10:].mean()], index=exp_idx)
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min').last()
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
args = downsample_methods
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min', name='index')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = getattr(s.resample(
'5min', closed='right', label='right'), arg)()
expected = s.groupby(grouplist).agg(func)
assert result.index.name == 'index'
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_numpy_compat(self):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range(
'20130101', periods=5, freq='s'))
r = s.resample('2s')
msg = "numpy operations are not valid with resample"
for func in ('min', 'max', 'sum', 'prod',
'mean', 'var', 'std'):
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func),
func, 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func), axis=1)
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = pd.DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class fn_class:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(fn_class())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A': np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days', freq='30T', periods=50)
df = DataFrame({'A': np.arange(1480)}, index=pd.to_timedelta(
np.arange(1480), unit='T'))
result = df.resample('30T').sum()
assert_frame_equal(result, expected)
s = df['A']
result = s.resample('30T').sum()
assert_series_equal(result, expected['A'])
def test_resample_single_period_timedelta(self):
s = Series(list(range(5)), index=pd.timedelta_range(
'1 day', freq='s', periods=5))
result = s.resample('2s').sum()
expected = Series([1, 5, 4], index=pd.timedelta_range(
'1 day', freq='2s', periods=3))
assert_series_equal(result, expected)
def test_resample_timedelta_idempotency(self):
# GH 12072
index = pd.timedelta_range('0', periods=9, freq='10L')
series = pd.Series(range(9), index=index)
result = series.resample('10L').mean()
expected = series
assert_series_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [
'date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s').sum()
expected = DataFrame({'value': [
4, 9, 4, 2
]}, index=date_range('2014-11-08', freq='6s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('7s').sum()
expected = DataFrame({'value': [
4, 10, 4, 1
]}, index=date_range('2014-11-08', freq='7s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('11s').sum()
expected = DataFrame({'value': [
11, 8
]}, index=date_range('2014-11-08', freq='11s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('13s').sum()
expected = DataFrame({'value': [
13, 6
]}, index=date_range('2014-11-08', freq='13s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('17s').sum()
expected = DataFrame({'value': [
16, 3
]}, index=date_range('2014-11-08', freq='17s', periods=2))
assert_frame_equal(result, expected)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun').last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/9/2005']
assert result.iloc[2] == s.iloc[-1]
result = s.resample('W-MON').last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s['1/3/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-TUE').last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s['1/4/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-WED').last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s['1/5/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-THU').last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s['1/6/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-FRI').last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s['1/7/2005']
assert result.iloc[1] == s['1/10/2005']
# to biz day
result = s.resample('B').last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/3/2005']
assert result.iloc[5] == s['1/9/2005']
assert result.index.name == 'index'
def test_resample_upsampling_picked_but_not_correct(self):
# Test for issue #3020
dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D').mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
import datetime
s = Series(np.arange(1., 6), index=[datetime.datetime(
1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1., 6), index=date_range(
'19750101', periods=5, freq='D'))
result = s.resample('D').count()
assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample('D').sum()
result2 = s.resample('D').mean()
assert_series_equal(result1, expected)
assert_series_equal(result2, expected)
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A').mean()
assert_series_equal(result['A'], df['A'].resample('A').mean())
result = df.resample('M').mean()
assert_series_equal(result['A'], df['A'].resample('M').mean())
df.resample('M', kind='period').mean()
df.resample('W-WED', kind='period').mean()
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=timedelta(minutes=1)).mean()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset='1min').mean()
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset=Minute(1)).mean()
assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun').last()
expected = ser.resample('w-sun', loffset=-bday).last()
assert result.index[0] - bday == expected.index[0]
def test_resample_loffset_count(self):
# GH 12725
start_time = '1/1/2000 00:00:00'
rng = date_range(start_time, periods=100, freq='S')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('10S', loffset='1s').count()
expected_index = (
date_range(start_time, periods=10, freq='10S') +
timedelta(seconds=1)
)
expected = pd.Series(10, index=expected_index)
assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
result = ts.resample('10S', loffset='1s').size()
assert_series_equal(result, expected)
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min').pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == 'index'
def test_resample_how_method(self):
# GH9915
s = pd.Series([11, 22],
index=[Timestamp('2015-03-31 21:48:52.672000'),
Timestamp('2015-03-31 21:49:52.739000')])
expected = pd.Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=[Timestamp('2015-03-31 21:48:50'),
Timestamp('2015-03-31 21:49:00'),
Timestamp('2015-03-31 21:49:10'),
Timestamp('2015-03-31 21:49:20'),
Timestamp('2015-03-31 21:49:30'),
Timestamp('2015-03-31 21:49:40'),
Timestamp('2015-03-31 21:49:50')])
assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point(self):
# GH 9756
index = DatetimeIndex(start='20150101', end='20150331', freq='BM')
expected = DataFrame({'A': Series([21, 41, 63], index=index)})
index = DatetimeIndex(start='20150101', end='20150331', freq='B')
df = DataFrame(
{'A': Series(range(len(index)), index=index)}, dtype='int64')
result = df.resample('BM').last()
assert_frame_equal(result, expected)
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').ffill(limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_nearest_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').nearest(limit=2)
expected = ts.reindex(result.index, method='nearest', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min').ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs['open'] == s[-6]
assert xs['high'] == s[-6:-1].max()
assert xs['low'] == s[-6:-1].min()
assert xs['close'] == s[-2]
xs = result.iloc[0]
assert xs['open'] == s[0]
assert xs['high'] == s[:5].max()
assert xs['low'] == s[:5].min()
assert xs['close'] == s[4]
def test_resample_ohlc_result(self):
# GH 12332
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
s = Series(range(len(index)), index=index)
a = s.loc[:'4-15-2000'].resample('30T').ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:'4-14-2000'].resample('30T').ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range('2013-12-30', '2014-01-07')
index = rng.drop([Timestamp('2014-01-01'),
Timestamp('2013-12-31'),
Timestamp('2014-01-04'),
Timestamp('2014-01-05')])
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample('B').mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B'))
assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe(self):
df = (
pd.DataFrame({
'PRICE': {
Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {
Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H').ohlc()
exp = pd.concat([df['VOLUME'].resample('H').ohlc(),
df['PRICE'].resample('H').ohlc()],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H').ohlc()
exp.columns = pd.MultiIndex.from_tuples([
('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'),
('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'),
('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq='M')
for i in range(12)])
df.iloc[3, :] = np.nan
result = df.resample('Q', axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [
Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right').mean()
result = bs.resample('8H').mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period').mean()
expected = ts.resample('A-DEC').mean()
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period').mean()
expected = ts.resample('A-JUN').mean()
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', closed='right',
label='right').ohlc()
assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc['1/1/2000 00:05'] == exp).all()
exp = _ohlc(ts['1/1/2000 5:55:01':])
assert (resampled.loc['1/1/2000 6:00:00'] == exp).all()
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M').mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
pytest.raises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1).mean()
expected = df.T.resample('M').mean().T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).mean()
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M').mean())
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2).mean()
expected = p_apply(panel2,
lambda x: x.resample('M', axis=1).mean())
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).apply(lambda x: x.mean(1))
expected = panel.resample('M', axis=1).mean()
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', axis=2).apply(lambda x: x.mean(2))
expected = panel.resample('M', axis=2).mean()
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left').mean()
expected = ts.resample(freq, closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D').apply(lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2).mean()
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_base_with_timedeltaindex(self):
# GH 10530
rng = timedelta_range(start='0s', periods=25, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample('2s', base=5).mean()
without_base = ts.resample('2s').mean()
exp_without_base = timedelta_range(start='0s', end='25s', freq='2s')
exp_with_base = timedelta_range(start='5s', end='29s', freq='2s')
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex(self):
# GH #12169
df = DataFrame({'Group_obj': 'A'},
index=pd.to_timedelta(list(range(20)), unit='s'))
df['Group'] = df['Group_obj'].astype('category')
result = df.resample('10s').agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame({'Group_obj': ['A', 'A'],
'Group': ['A', 'A']},
index=pd.to_timedelta([0, 10], unit='s'))
expected = expected.reindex(['Group_obj', 'Group'], axis=1)
tm.assert_frame_equal(result, expected)
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left').mean()
expected = ts.resample('D', closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period').mean()
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg(self):
# aggregate a period resampler with a lambda
s2 = pd.Series(np.random.randint(0, 5, 50),
index=pd.period_range('2012-01-01',
freq='H',
periods=50),
dtype='float64')
expected = s2.to_timestamp().resample('D').mean().to_period()
result = s2.resample('D').agg(lambda x: x.mean())
assert_series_equal(result, expected)
def test_resample_segfault(self):
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)]
df = pd.DataFrame.from_records(all_wins_and_wagers,
columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
assert_frame_equal(result, expected)
def test_resample_dtype_preservation(self):
# GH 12202
# validation tests for dtype preservation
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4, freq='W'),
'group': [1, 1, 2, 2],
'val': Series([5, 6, 7, 8],
dtype='int32')}
).set_index('date')
result = df.resample('1D').ffill()
assert result.val.dtype == np.int32
result = df.groupby('group').resample('1D').ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coerceion(self):
pytest.importorskip('scipy.interpolate')
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = pd.DataFrame(
df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = (df.astype("float64")
.resample("H")
.mean()
["a"]
.interpolate("cubic")
)
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W').mean()
expected = ts.resample('W-SUN').mean()
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('M')
def test_nanosecond_resample_error(self):
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(
start=pd.to_datetime(start),
periods=10,
freq='100n'
)
ts = pd.Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg('mean')
exp_indx = pd.date_range(
start=pd.to_datetime(exp_start),
periods=10,
freq='100n'
)
exp = pd.Series(range(len(exp_indx)), index=exp_indx)
assert_series_equal(result, exp)
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M').mean()
expected = df.resample(
'M', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left').mean()
exp = df.tshift(1, freq='D').resample('M', kind='period').mean()
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q').mean()
expected = df.resample(
'Q', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left').mean()
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left').mean()
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M').mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday(self):
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
) | pd.date_range(
'2014-10-15 23:00:00', periods=2, freq='2200L')
s = pd.Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample('2200L').mean()
assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:02.000')
# Ensure right closing works
result = s.resample('2200L', label='right').mean()
assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:04.200')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left').mean()
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
tm.assert_index_equal(result.index, ex_index)
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC').mean()
assert len(result) == 0
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period').mean()
assert len(result) == 1
assert result.index[0] == Period('2000-04', freq='M')
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d').mean()
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min').aggregate(['mean', 'sum'])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D').sum()
exp = ts.sort_index().resample('D').sum()
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64', 'int32', 'float64', 'float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M').apply(lambda x: x.mean())
exp = ts.resample('M').mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample('M').mean()
foo_exp.name = 'foo'
bar_exp = ts.resample('M').std()
bar_exp.name = 'bar'
result = ts.resample('M').apply(
[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ['foo', 'bar']
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample('M').aggregate({'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
tm.assert_series_equal(result['foo'], foo_exp, check_names=False)
tm.assert_series_equal(result['bar'], bar_exp, check_names=False)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS').sum()
def test_resample_consistency(self):
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range('2002-02-02', periods=4, freq='30T')
s = pd.Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min').bfill(limit=2)
r10 = s.resample('10Min').bfill()
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper(self):
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M').count()
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31',
'2014-09-30',
'2014-10-31', '2014-11-30'],
freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(
len(dates))))
result = df.set_index('A').resample('M').count()
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def test_resample_nunique(self):
# GH 12352
df = DataFrame({
'ID': {pd.Timestamp('2015-06-05 00:00:00'): '0010100903',
pd.Timestamp('2015-06-08 00:00:00'): '0010150847'},
'DATE': {pd.Timestamp('2015-06-05 00:00:00'): '2015-06-05',
pd.Timestamp('2015-06-08 00:00:00'): '2015-06-08'}})
r = df.resample('D')
g = df.groupby(pd.Grouper(freq='D'))
expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x:
x.nunique())
assert expected.name == 'ID'
for t in [r, g]:
result = r.ID.nunique()
assert_series_equal(result, expected)
result = df.ID.resample('D').nunique()
assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq='D')).nunique()
assert_series_equal(result, expected)
def test_resample_nunique_with_date_gap(self):
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h')
index3 = index.append(index2)
s = pd.Series(range(len(index3)), index=index3, dtype='int64')
r = s.resample('M')
# Since all elements are unique, these should all be the same
results = [
r.count(),
r.nunique(),
r.agg(pd.Series.nunique),
r.agg('nunique')
]
assert_series_equal(results[0], results[1])
assert_series_equal(results[0], results[2])
assert_series_equal(results[0], results[3])
def test_resample_group_info(self): # GH10914
for n, k in product((10000, 100000), (10, 100, 1000)):
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T').nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(),
freq='30T')
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side='right')
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1,
minlength=len(ix)).astype('int64', copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size(self):
n = 10000
dr = date_range('2015-09-19', periods=n, freq='T')
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample('7T').size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T')
bins = np.searchsorted(ix.values, ts.index.values, side='right')
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',
copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resample_across_dst(self):
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=['ts'])
dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=['ts'])
dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule='H').sum()
expected = DataFrame([5, 5], index=dti2)
assert_frame_equal(result, expected)
def test_resample_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
assert_frame_equal(df.resample(rule='D').sum(),
DataFrame([5], index=df.index.normalize()))
df.resample(rule='MS').sum()
assert_frame_equal(
df.resample(rule='MS').sum(),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)],
tz='US/Eastern')))
dti = date_range('2013-09-30', '2013-11-02', freq='30Min',
tz='Europe/Paris')
values = range(dti.size)
df = DataFrame({"a": values,
"b": values,
"c": values}, index=dti, dtype='int64')
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193]},
index=date_range('9/30/2013', '11/4/2013',
freq='W-MON', tz='Europe/Paris')),
'W-MON Frequency')
assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193]},
index=date_range('9/30/2013', '11/11/2013',
freq='2W-MON', tz='Europe/Paris')),
'2W-MON Frequency')
assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 1538],
"b": [47, 1537, 1586],
"c": [48, 1490, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='MS', tz='Europe/Paris')),
'MS Frequency')
assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 1538],
"b": [1537, 1586],
"c": [1538, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='2MS', tz='Europe/Paris')),
'2MS Frequency')
df_daily = df['10/26/2013':'10/29/2013']
assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})
[["a", "b", "c"]],
DataFrame({"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48]},
index=date_range('10/26/2013', '10/29/2013',
freq='D', tz='Europe/Paris')),
'D Frequency')
def test_resample_with_nat(self):
# GH 13020
index = DatetimeIndex([pd.NaT,
'1970-01-01 00:00:00',
pd.NaT,
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame_1s = DataFrame([3, 7, 11], index=index_1s)
assert_frame_equal(frame.resample('1s').mean(), frame_1s)
index_2s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:02'])
frame_2s = DataFrame([5, 11], index=index_2s)
assert_frame_equal(frame.resample('2s').mean(), frame_2s)
index_3s = DatetimeIndex(['1970-01-01 00:00:00'])
frame_3s = DataFrame([7], index=index_3s)
assert_frame_equal(frame.resample('3s').mean(), frame_3s)
assert_frame_equal(frame.resample('60s').mean(), frame_3s)
def test_resample_timedelta_values(self):
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range('1 day', '4 day', freq='4D')
df = DataFrame({'time': times}, index=times)
times2 = timedelta_range('1 day', '4 day', freq='2D')
exp = Series(times2, index=times2, name='time')
exp.iloc[1] = pd.NaT
res = df.resample('2D').first()['time']
tm.assert_series_equal(res, exp)
res = df['time'].resample('2D').first()
tm.assert_series_equal(res, exp)
def test_resample_datetime_values(self):
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({'timestamp': dates}, index=dates)
exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range('2016-01-15', periods=3, freq='2D'),
name='timestamp')
res = df.resample('2D').first()['timestamp']
tm.assert_series_equal(res, exp)
res = df['timestamp'].resample('2D').first()
tm.assert_series_equal(res, exp)
class TestPeriodIndex(Base):
_index_factory = lambda x: period_range
@pytest.fixture
def _series_name(self):
return 'pi'
def create_series(self):
# TODO: replace calls to .create_series() by injecting the series
# fixture
i = period_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='pi')
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + 1).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
def test_selection(self, index, freq, kind):
# This is a bug, these should be implemented
# GH 14008
df = pd.DataFrame({'date': index,
'a': np.arange(len(index), dtype=np.int64)},
index=pd.MultiIndex.from_arrays([
np.arange(len(index), dtype=np.int64),
index], names=['v', 'd']))
with pytest.raises(NotImplementedError):
df.resample(freq, on='date', kind=kind)
with pytest.raises(NotImplementedError):
df.resample(freq, level='d', kind=kind)
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
pytest.raises(ValueError, lambda: ts.resample('a-dec').mean())
pytest.raises(ValueError, lambda: ts.resample('q-mar').mean())
pytest.raises(ValueError, lambda: ts.resample('M').mean())
pytest.raises(ValueError, lambda: ts.resample('w-thu').mean())
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = pd.Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = pd.Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self):
# GH12770
series = pd.Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
for method in resample_methods:
result = getattr(series.resample('M'), method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
with pytest.raises(IncompatibleFrequency):
pd.Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1)
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - 1)
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='end').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
pytest.raises(Exception, lambda: s.resample('A').ffill())
@pytest.mark.parametrize('freq', ['5min'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_resample_5minute(self, freq, kind):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
expected = ts.to_timestamp().resample(freq).mean()
if kind != 'timestamp':
expected = expected.to_period(freq)
result = ts.resample(freq, kind=kind).mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU').asfreq()
assert result.isna().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W').mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample(
'W').mean().tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D').mean()
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right').mean()
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period').mean()
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first'] = np.random.randn(len(rng))
ts['second'] = np.cumsum(np.random.randn(len(rng)))
expected = DataFrame(
{
'first': ts.resample('A').sum()['first'],
'second': ts.resample('A').mean()['second']},
columns=['first', 'second'])
result = ts.resample(
'A').agg({'first': np.sum,
'second': np.mean}).reindex(columns=['first', 'second'])
assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', closed='left', label='right').mean()
exp = s[1:].resample('10min', closed='left', label='right').mean()
assert_series_equal(result, exp)
result = s.resample('10min', closed='left', label='left').mean()
exp = s[1:].resample('10min', closed='left', label='left').mean()
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
tm.assert_index_equal(result.index, ex_index)
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A').mean()
exp = ts.to_timestamp().resample('A').mean().to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', closed='left', label='left').first()
def test_resample_with_dst_time_change(self):
# GH 15549
index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000],
tz='UTC').tz_convert('America/Chicago')
df = pd.DataFrame([1, 2], index=index)
result = df.resample('12h', closed='right',
label='right').last().ffill()
expected_index_values = ['2016-03-09 12:00:00-06:00',
'2016-03-10 00:00:00-06:00',
'2016-03-10 12:00:00-06:00',
'2016-03-11 00:00:00-06:00',
'2016-03-11 12:00:00-06:00',
'2016-03-12 00:00:00-06:00',
'2016-03-12 12:00:00-06:00',
'2016-03-13 00:00:00-06:00',
'2016-03-13 13:00:00-05:00',
'2016-03-14 01:00:00-05:00',
'2016-03-14 13:00:00-05:00',
'2016-03-15 01:00:00-05:00',
'2016-03-15 13:00:00-05:00']
index = pd.DatetimeIndex(expected_index_values,
tz='UTC').tz_convert('America/Chicago')
expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 2.0], index=index)
assert_frame_equal(result, expected)
def test_resample_bms_2752(self):
# GH2753
foo = pd.Series(index=pd.bdate_range('20000101', '20000201'))
res1 = foo.resample("BMS").mean()
res2 = foo.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp('20000103')
assert res1.index[0] == res2.index[0]
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span').mean()
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right').mean())
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left').mean())
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A").mean()
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3),
index=date_range('2000-1-1', periods=9))
result = df.resample('5D').mean()
expected = pd.concat(
[df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T
expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')]
assert_frame_equal(result, expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort_index()
index = date_range('2001-5-4', periods=4, freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D').count()
assert_frame_equal(result, expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D').sum()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
@pytest.mark.parametrize('agg_arg', ['mean', {'value': 'mean'}, ['mean']])
def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg):
# make sure passing loffset returns DatetimeIndex in all cases
# basic method taken from Base.test_resample_loffset_arg_type()
df = frame
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
result_agg = df.resample('2D', loffset='2H', kind=kind).agg(agg_arg)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result_how = df.resample('2D', how=agg_arg, loffset='2H',
kind=kind)
if isinstance(agg_arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value', 'mean')])
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
@pytest.mark.parametrize('freq, period_mult', [('H', 24), ('12H', 2)])
@pytest.mark.parametrize('kind', [None, 'period'])
def test_upsampling_ohlc(self, freq, period_mult, kind):
# GH 13083
pi = PeriodIndex(start='2000', freq='D', periods=10)
s = Series(range(len(pi)), index=pi)
expected = s.to_timestamp().resample(freq).ohlc().to_period(freq)
# timestamp-based resampling doesn't include all sub-periods
# of the last original period, so extend accordingly:
new_index = PeriodIndex(start='2000', freq=freq,
periods=period_mult * len(pi))
expected = expected.reindex(new_index)
result = s.resample(freq, kind=kind).ohlc()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('periods, values',
[([pd.NaT, '1970-01-01 00:00:00', pd.NaT,
'1970-01-01 00:00:02', '1970-01-01 00:00:03'],
[2, 3, 5, 7, 11]),
([pd.NaT, pd.NaT, '1970-01-01 00:00:00', pd.NaT,
pd.NaT, pd.NaT, '1970-01-01 00:00:02',
'1970-01-01 00:00:03', pd.NaT, pd.NaT],
[1, 2, 3, 5, 6, 8, 7, 11, 12, 13])])
@pytest.mark.parametrize('freq, expected_values',
[('1s', [3, np.NaN, 7, 11]),
('2s', [3, int((7 + 11) / 2)]),
('3s', [int((3 + 7) / 2), 11])])
def test_resample_with_nat(self, periods, values, freq, expected_values):
# GH 13224
index = PeriodIndex(periods, freq='S')
frame = DataFrame(values, index=index)
expected_index = period_range('1970-01-01 00:00:00',
periods=len(expected_values), freq=freq)
expected = DataFrame(expected_values, index=expected_index)
result = frame.resample(freq).mean()
assert_frame_equal(result, expected)
def test_resample_with_only_nat(self):
# GH 13224
pi = PeriodIndex([pd.NaT] * 3, freq='S')
frame = DataFrame([2, 3, 5], index=pi)
expected_index = PeriodIndex(data=[], freq=pi.freq)
expected = DataFrame([], index=expected_index)
result = frame.resample('1s').mean()
assert_frame_equal(result, expected)
class TestTimedeltaIndex(Base):
_index_factory = lambda x: timedelta_range
@pytest.fixture
def _index_start(self):
return '1 day'
@pytest.fixture
def _index_end(self):
return '10 day'
@pytest.fixture
def _series_name(self):
return 'tdi'
def create_series(self):
i = timedelta_range('1 day',
'10 day', freq='D')
return Series(np.arange(len(i)), index=i, name='tdi')
def test_asfreq_bug(self):
import datetime as dt
df = DataFrame(data=[1, 3],
index=[dt.timedelta(), dt.timedelta(minutes=3)])
result = df.resample('1T').asfreq()
expected = DataFrame(data=[1, np.nan, np.nan, 3],
index=timedelta_range('0 day',
periods=4,
freq='1T'))
assert_frame_equal(result, expected)
class TestResamplerGrouper(object):
def setup_method(self, method):
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)},
index=date_range('1/1/2000',
freq='s',
periods=40))
def test_back_compat_v180(self):
df = self.frame
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how=how)
expected = getattr(df.groupby('A').resample('4s'), how)()
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how='mean',
fill_method='ffill')
expected = df.groupby('A').resample('4s').mean().ffill()
assert_frame_equal(result, expected)
def test_tab_complete_ipython6_warning(self, ip):
from IPython.core.completer import provisionalcompleter
code = dedent("""\
import pandas.util.testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
""")
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('rs.', 1))
def test_deferred_with_groupby(self):
# GH 12486
# support deferred resample ops with groupby
data = [['2010-01-01', 'A', 2], ['2010-01-02', 'A', 3],
['2010-01-05', 'A', 8], ['2010-01-10', 'A', 7],
['2010-01-13', 'A', 3], ['2010-01-01', 'B', 5],
['2010-01-03', 'B', 2], ['2010-01-04', 'B', 1],
['2010-01-11', 'B', 7], ['2010-01-14', 'B', 3]]
df = DataFrame(data, columns=['date', 'id', 'score'])
df.date = pd.to_datetime(df.date)
f = lambda x: x.set_index('date').resample('D').asfreq()
expected = df.groupby('id').apply(f)
result = df.set_index('date').groupby('id').resample('D').asfreq()
assert_frame_equal(result, expected)
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
f = lambda x: x.resample('1D').ffill()
expected = df.groupby('group').apply(f)
result = df.groupby('group').resample('1D').ffill()
assert_frame_equal(result, expected)
def test_getitem(self):
g = self.frame.groupby('A')
expected = g.B.apply(lambda x: x.resample('2s').mean())
result = g.resample('2s').B.mean()
assert_series_equal(result, expected)
result = g.B.resample('2s').mean()
assert_series_equal(result, expected)
result = g.resample('2s').mean().B
assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{'id': 1, 'buyer': 'A'}, {'id': 2, 'buyer': 'B'}]
df = pd.DataFrame(data, index=pd.date_range('2016-01-01', periods=2))
r = df.groupby('id').resample('1D')
result = r['buyer'].count()
expected = pd.Series([1, 1],
index=pd.MultiIndex.from_tuples(
[(1, pd.Timestamp('2016-01-01')),
(2, pd.Timestamp('2016-01-02'))],
names=['id', None]),
name='buyer')
assert_series_equal(result, expected)
result = r['buyer'].count()
assert_series_equal(result, expected)
def test_nearest(self):
# GH 17496
# Resample nearest
index = pd.date_range('1/1/2000', periods=3, freq='T')
result = pd.Series(range(3), index=index).resample('20s').nearest()
expected = pd.Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
['2000-01-01 00:00:00', '2000-01-01 00:00:20',
'2000-01-01 00:00:40', '2000-01-01 00:01:00',
'2000-01-01 00:01:20', '2000-01-01 00:01:40',
'2000-01-01 00:02:00'],
dtype='datetime64[ns]',
freq='20S'))
assert_series_equal(result, expected)
def test_methods(self):
g = self.frame.groupby('A')
r = g.resample('2s')
for f in ['first', 'last', 'median', 'sem', 'sum', 'mean',
'min', 'max']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
for f in ['size']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['count']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
# series only
for f in ['nunique']:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['nearest', 'backfill', 'ffill', 'asfreq']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample('2s').ohlc())
assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample('2s'), f)(ddof=1))
assert_frame_equal(result, expected)
def test_apply(self):
g = self.frame.groupby('A')
r = g.resample('2s')
# reduction
expected = g.resample('2s').sum()
def f(x):
return x.resample('2s').sum()
result = r.apply(f)
assert_frame_equal(result, expected)
def f(x):
return x.resample('2s').apply(lambda y: y.sum())
result = g.apply(f)
assert_frame_equal(result, expected)
def test_apply_with_mutated_index(self):
# GH 15169
index = pd.date_range('1-1-2015', '12-31-15', freq='D')
df = pd.DataFrame(data={'col1': np.random.rand(len(index))},
index=index)
def f(x):
s = pd.Series([1, 2], index=['a', 'b'])
return s
expected = df.groupby(pd.Grouper(freq='M')).apply(f)
result = df.resample('M').apply(f)
assert_frame_equal(result, expected)
# A case for series
expected = df['col1'].groupby(pd.Grouper(freq='M')).apply(f)
result = df['col1'].resample('M').apply(f)
assert_series_equal(result, expected)
def test_resample_groupby_with_label(self):
# GH 13235
index = date_range('2000-01-01', freq='2D', periods=5)
df = DataFrame(index=index,
data={'col0': [0, 0, 1, 1, 2], 'col1': [1, 1, 1, 1, 1]}
)
result = df.groupby('col0').resample('1W', label='left').sum()
mi = [np.array([0, 0, 1, 2]),
pd.to_datetime(np.array(['1999-12-26', '2000-01-02',
'2000-01-02', '2000-01-02'])
)
]
mindex = pd.MultiIndex.from_arrays(mi, names=['col0', None])
expected = DataFrame(data={'col0': [0, 0, 2, 2], 'col1': [1, 1, 2, 1]},
index=mindex
)
assert_frame_equal(result, expected)
def test_consistency_with_window(self):
# consistent return values with window
df = self.frame
expected = pd.Int64Index([1, 2, 3], name='A')
result = df.groupby('A').resample('2s').mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby('A').rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns(self):
# GH 14233
df = pd.DataFrame(np.random.randn(20, 3),
columns=list('aaa'),
index=pd.date_range('2012-01-01',
periods=20, freq='s'))
df2 = df.copy()
df2.columns = ['a', 'b', 'c']
expected = df2.resample('5s').median()
result = df.resample('5s').median()
expected.columns = result.columns
assert_frame_equal(result, expected)
class TestTimeGrouper(object):
def setup_method(self, method):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.sort_values()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
expected = self.ts.groupby(lambda x: x.year).count()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
result = self.ts.groupby(grouper).count()
expected.index = result.index
assert_series_equal(result, expected)
result = self.ts.resample('A').count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', closed='right').prod()
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
tm.assert_index_equal(result.index, df.index)
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
with catch_warnings(record=True):
wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
_, grouper, _ = tg._get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert (isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex')
index_funcs = (tm.makeIntIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assert_raises_regex(TypeError,
"Only valid with "
"DatetimeIndex, TimedeltaIndex "
"or PeriodIndex, but got an "
"instance of %r" % name):
df.groupby(TimeGrouper('D'))
def test_aaa_group_order(self):
# GH 12840
# check TimeGrouper perform stable sorts
n = 20
data = np.random.randn(n, 4)
df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
grouped = df.groupby(TimeGrouper(key='key', freq='D'))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)),
df[::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)),
df[1::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)),
df[2::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)),
df[3::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)),
df[4::5])
def test_aggregate_normal(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod', 'var', 'std', 'mean']:
expected = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# GH 7453
for func in ['size']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 7453
for func in ['first', 'last']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# if TimeGrouper is used included, 'nth' doesn't work yet
"""
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01',
freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_resample_entirly_nat_window(self, method, unit):
s = pd.Series([0] * 2 + [np.nan] * 2,
index=pd.date_range('2017', periods=4))
# 0 / 1 by default
result = methodcaller(method)(s.resample("2d"))
expected = pd.Series([0.0, unit],
index=pd.to_datetime(['2017-01-01',
'2017-01-03']))
tm.assert_series_equal(result, expected)
# min_count=0
result = methodcaller(method, min_count=0)(s.resample("2d"))
expected = pd.Series([0.0, unit],
index=pd.to_datetime(['2017-01-01',
'2017-01-03']))
tm.assert_series_equal(result, expected)
# min_count=1
result = methodcaller(method, min_count=1)(s.resample("2d"))
expected = pd.Series([0.0, np.nan],
index=pd.to_datetime(['2017-01-01',
'2017-01-03']))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, fill_value', [
('min', np.nan),
('max', np.nan),
('sum', 0),
('prod', 1),
('count', 0),
])
def test_aggregate_with_nat(self, func, fill_value):
# check TimeGrouper's aggregation is identical as normal groupby
# if NaT is included, 'var', 'std', 'mean', 'first','last'
# and 'nth' doesn't work yet
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[fill_value] * 4], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
assert dt_result.index.name == 'key'
def test_aggregate_with_nat_size(self):
# GH 9925
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
normal_result = normal_grouped.size()
dt_result = dt_grouped.size()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_series_equal(expected, dt_result)
assert dt_result.index.name == 'key'
def test_repr(self):
# GH18203
result = repr(TimeGrouper(key='A', freq='H'))
expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', base=0)")
assert result == expected
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_upsample_sum(self, method, unit):
s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H"))
resampled = s.resample("30T")
index = pd.to_datetime(['2017-01-01T00:00:00',
'2017-01-01T00:30:00',
'2017-01-01T01:00:00'])
# 0 / 1 by default
result = methodcaller(method)(resampled)
expected = pd.Series([1, unit, 1], index=index)
tm.assert_series_equal(result, expected)
# min_count=0
result = methodcaller(method, min_count=0)(resampled)
expected = pd.Series([1, unit, 1], index=index)
tm.assert_series_equal(result, expected)
# min_count=1
result = methodcaller(method, min_count=1)(resampled)
expected = pd.Series([1, np.nan, 1], index=index)
tm.assert_series_equal(result, expected)
# min_count>1
result = methodcaller(method, min_count=2)(resampled)
expected = pd.Series([np.nan, np.nan, np.nan], index=index)
tm.assert_series_equal(result, expected)
|
the-stack_106_29235 | import os
import torch
import pickle
from MeLU import MeLU
from options import config
def selection(melu, master_path, topk):
if not os.path.exists("{}/scores/".format(master_path)):
os.mkdir("{}/scores/".format(master_path))
if config['use_cuda']:
melu.cuda()
melu.eval()
target_state = 'warm_state'
dataset_size = int(len(os.listdir("{}/{}".format(master_path, target_state))) / 4)
grad_norms = {}
for j in list(range(dataset_size)):
support_xs = pickle.load(open("{}/{}/supp_x_{}.pkl".format(master_path, target_state, j), "rb"))
support_ys = pickle.load(open("{}/{}/supp_y_{}.pkl".format(master_path, target_state, j), "rb"))
item_ids = []
with open("{}/log/{}/supp_x_{}_u_m_ids.txt".format(master_path, target_state, j), "r") as f:
for line in f.readlines():
item_id = line.strip().split()[1]
item_ids.append(item_id)
for support_x, support_y, item_id in zip(support_xs, support_ys, item_ids):
support_x = support_x.view(1, -1)
support_y = support_y.view(1, -1)
norm = melu.get_weight_avg_norm(support_x, support_y, config['inner'])
try:
grad_norms[item_id]['discriminative_value'] += norm.item()
grad_norms[item_id]['popularity_value'] += 1
except:
grad_norms[item_id] = {
'discriminative_value': norm.item(),
'popularity_value': 1
}
d_value_max = 0
p_value_max = 0
for item_id in grad_norms.keys():
grad_norms[item_id]['discriminative_value'] /= grad_norms[item_id]['popularity_value']
if grad_norms[item_id]['discriminative_value'] > d_value_max:
d_value_max = grad_norms[item_id]['discriminative_value']
if grad_norms[item_id]['popularity_value'] > p_value_max:
p_value_max = grad_norms[item_id]['popularity_value']
for item_id in grad_norms.keys():
grad_norms[item_id]['discriminative_value'] /= float(d_value_max)
grad_norms[item_id]['popularity_value'] /= float(p_value_max)
grad_norms[item_id]['final_score'] = grad_norms[item_id]['discriminative_value'] * grad_norms[item_id]['popularity_value']
movie_info = {}
with open("./movielens/ml-1m/movies_extrainfos.dat", encoding="utf-8") as f:
for line in f.readlines():
tmp = line.strip().split("::")
movie_info[tmp[0]] = "{} ({})".format(tmp[1], tmp[2])
evidence_candidates = []
for item_id, value in list(sorted(grad_norms.items(), key=lambda x: x[1]['final_score'], reverse=True))[:topk]:
evidence_candidates.append((movie_info[item_id], value['final_score']))
return evidence_candidates
|
the-stack_106_29237 | import numpy as np
from bokeh import plotting
from bokeh.embed import components
from bokeh.io import curdoc
from bokeh.models import FuncTickFormatter, OpenURL, TapTool
from bokeh.models import Label, Legend, LegendItem, LogAxis, Range1d
from bokeh.themes import Theme
from utils import get_update_time, load_data, log_axis_labels
# get the exoplot theme
theme = Theme(filename="./exoplots_theme.yaml")
curdoc().theme = theme
# what order to plot things and what the legend labels will say
methods = ['Transit', 'Radial Velocity',]
# markers and colors in the same order as the missions above
markers = ['circle', 'square', 'triangle', 'diamond', 'inverted_triangle']
# colorblind friendly palette from https://personal.sron.nl/~pault/
# other ideas:
# https://thenode.biologists.com/data-visualization-with-flying-colors/research/
colors = ['#228833', '#ee6677', '#ccbb44', '#aa3377', '#4477aa',
'#aaaaaa', '#66ccee']
# output files
embedfile = '_includes/radius_tsm_embed.html'
fullfile = '_includes/radius_tsm.html'
# set up the full output file
plotting.output_file(fullfile, title='Radius Mass Plot')
# load the data
dfcon, dfkoi, dfk2, dftoi = load_data()
# what to display when hovering over a data point
TOOLTIPS = [
("Planet", "@planet"),
# only give the decimal and sig figs if needed
("Period", "@period{0,0[.][0000]} days"),
("Radius", "@radius_e{0,0[.][00]} Earth; @jupradius{0,0[.][0000]} Jup"),
("Mass", "@mass{0,0[.][00]} Earth; @jupmass{0,0[.][0000]} Jup"),
("TSM","@TSM{0,0[.][000]}"),
("Discovered via", "@method")
]
# create the figure
fig = plotting.figure(x_axis_type='log', y_axis_type='log', tooltips=TOOLTIPS)
# allow for something to happen when you click on data points
fig.add_tools(TapTool())
# need to store min and max radius values to create the second axis
ymin = 1
ymax = 1
# save the output plots to rearrange them in the legend
glyphs = []
counts = []
for ii, imeth in enumerate(methods):
# select the appropriate set of planets for each mission
if imeth == 'Other':
good = ((~np.in1d(dfcon['pl_discmethod'], methods)) &
(~dfcon['pl_discmethod'].str.contains('Timing')) &
np.isfinite(dfcon['pl_bmasse']) &
np.isfinite(dfcon['pl_rade']) &
np.isfinite(dfcon['pl_eqt']) &
np.isfinite(dfcon['st_rad']) &
np.isfinite(dfcon['st_j']) &
np.isfinite(dfcon['pl_orbper']))
elif imeth == 'Timing Variations':
good = (dfcon['pl_discmethod'].str.contains('Timing') &
np.isfinite(dfcon['pl_bmasse']) &
np.isfinite(dfcon['pl_rade']) &
np.isfinite(dfcon['pl_eqt']) &
np.isfinite(dfcon['st_rad']) &
np.isfinite(dfcon['st_j']) &
np.isfinite(dfcon['pl_orbper']))
else:
good = ((dfcon['pl_discmethod'] == imeth) &
np.isfinite(dfcon['pl_bmasse']) &
np.isfinite(dfcon['pl_rade']) &
np.isfinite(dfcon['pl_eqt']) &
np.isfinite(dfcon['st_rad']) &
np.isfinite(dfcon['st_j']) &
np.isfinite(dfcon['pl_orbper']))
scale_factor = np.zeros((dfcon['pl_rade'][good]).size) + 1.15 # base case
scale_factor[dfcon['pl_rade'][good] < 1.5] = 0.190
scale_factor[(1.5 <= dfcon['pl_rade'][good]) & (dfcon['pl_rade'][good] < 2.75)] = 1.26
scale_factor[(2.75 <= dfcon['pl_rade'][good]) & (dfcon['pl_rade'][good] < 4)] = 1.28
#if dfcon['pl_rade'] < 1.5:
# Scale_Factor = .190
#elif 1.5<dfcon['pl_rade']<2.75:
# Scale_Factor = 1.26
#elif 2.75<dfcon['pl_rade']<4:
# Scale_Factor = 1.28
#else:
# Scale_Factor = 1.15
# make the alpha of large groups lower so they don't dominate so much
alpha = 1. - good.sum()/1000.
alpha = max(0.2, alpha)
# what the hover tooltip draws its values from
source = plotting.ColumnDataSource(data=dict(
planet=dfcon['pl_name'][good],
period=dfcon['pl_orbper'][good],
host=dfcon['pl_hostname'][good],
mass=dfcon['pl_bmasse'][good],
method=dfcon['pl_discmethod'][good],
jupmass=dfcon['pl_bmassj'][good],
jupradius=dfcon['pl_radj'][good],
radius_e=dfcon['pl_rade'][good],
equil_temp=dfcon['pl_eqt'][good],
radius_sun=dfcon['st_rad'][good],
mag_star=dfcon['st_j'][good],
TSM=scale_factor* ((((dfcon['pl_rade'][good])**3)*(dfcon['pl_eqt'][good]))/((dfcon['pl_bmasse'][good])*(dfcon['st_rad'][good])**2))*(10**(-1*(dfcon['st_j'][good])/5)),
url=dfcon['url'][good]
))
print(imeth, ': ', good.sum())
counts.append(f'{good.sum():,}')
# plot the planets
# nonselection stuff is needed to prevent planets in that category from
# disappearing when you click on a data point ("select" it)
glyph = fig.scatter('radius_e', 'TSM', color=colors[ii], source=source,
size=8, alpha=alpha, marker=markers[ii],
nonselection_alpha=alpha,
nonselection_color=colors[ii])
glyphs.append(glyph)
# save the global min/max
ymin = min(ymin, source.data['mass'].min())
ymax = max(ymax, source.data['mass'].max())
# set up where to send people when they click on a planet
url = "@url"
taptool = fig.select(TapTool)
taptool.callback = OpenURL(url=url)
# figure out what the default axis limits are
ydiff = np.log10(ymax) - np.log10(ymin)
ystart = 10.**(np.log10(ymin) - 0.05*ydiff)
yend = 10.**(np.log10(ymax) + 0.05*ydiff)
# jupiter/earth mass ratio
massratio = 317.8
# add the first y-axis's label and use our custom log formatting for both axes
fig.yaxis.axis_label = 'TSM'
fig.yaxis.formatter = FuncTickFormatter(code=log_axis_labels())
# add the x-axis's label and use our custom log formatting
fig.xaxis.axis_label = 'Radius (Earth Radii)'
fig.xaxis.formatter = FuncTickFormatter(code=log_axis_labels())
# set up all the legend objects
items = [LegendItem(label=ii + f' ({counts[methods.index(ii)]})',
renderers=[jj])
for ii, jj in zip(methods, glyphs)]
# create the legend
legend = Legend(items=items, location="center")
legend.title = 'Discovered via'
legend.spacing = 10
legend.margin = 8
fig.add_layout(legend, 'above')
# overall figure title
fig.title.text = 'Confirmed Planets'
# create the three lines of credit text in the two bottom corners
label_opts1 = dict(
x=-84, y=42,
x_units='screen', y_units='screen'
)
label_opts2 = dict(
x=-84, y=47,
x_units='screen', y_units='screen'
)
label_opts3 = dict(
x=612, y=64,
x_units='screen', y_units='screen', text_align='right',
text_font_size='9pt'
)
msg1 = 'By Exoplots'
# when did the data last get updated
modtimestr = get_update_time().strftime('%Y %b %d')
msg3 = 'Data: NASA Exoplanet Archive'
caption1 = Label(text=msg1, **label_opts1)
caption2 = Label(text=modtimestr, **label_opts2)
caption3 = Label(text=msg3, **label_opts3)
fig.add_layout(caption1, 'below')
fig.add_layout(caption2, 'below')
fig.add_layout(caption3, 'below')
plotting.save(fig)
# save the individual pieces so we can just embed the figure without the whole
# html page
script, div = components(fig, theme=theme)
with open(embedfile, 'w') as ff:
ff.write(script)
ff.write(div)
|
the-stack_106_29238 | from typing import Set
import logging
from pathlib import Path
import itertools as it
import re
from zensols.config import YamlConfig
logger = logging.getLogger(__name__)
class AppConfig(YamlConfig):
"""Application specific configuration access and parsing.
Since much of the application centers around configuration of what to
persist, this class does more heavy lifting than most configuraetion like
classes.
"""
ROOT = 'discover'
OBJECTS_PATH = f'{ROOT}.objects'
PROFILES_PATH = f'{ROOT}.profiles'
EMPTY_DIR_PATH = f'{ROOT}.empty_dirs'
OBJECTS_PROFILE_PATH = f'{PROFILES_PATH}.{{}}.objects'
EMPTY_DIR_PROFILE_PATH = f'{PROFILES_PATH}.{{}}.empty_dirs'
def __init__(self, config_file=None, default_vars=None):
super(AppConfig, self).__init__(
config_file, delimiter='^', default_vars=default_vars)
@property
def _find_profiles(self):
opts = self.get_options(self.PROFILES_PATH, expect=False)
if opts is None:
opts = ()
return opts
@property
def all_profiles(self) -> Set[str]:
return set(self._find_profiles.keys())
@staticmethod
def _split_profiles(profile_str):
return re.split(r'\s*,\s*', profile_str)
@property
def _default_profiles(self):
strlist = self.get_option(
f'{self.ROOT}.default_profiles', expect=False)
if strlist is not None:
return self._split_profiles(strlist)
def get_profiles(self, profile_overide_str):
if profile_overide_str is None:
profiles = self._default_profiles
else:
profiles = self._split_profiles(profile_overide_str)
if profiles is None:
profiles = self._find_profiles
profiles = list(profiles)
# protect user error
if 'default' not in profiles:
profiles = ['default'] + list(profiles)
if 'nodefault' in profiles:
profiles.pop(profiles.index('default'))
profiles.pop(profiles.index('nodefault'))
return profiles
def _iterate_objects(self, profile):
if profile == 'default':
path = self.OBJECTS_PATH
else:
path = self.OBJECTS_PROFILE_PATH.format(profile)
opts = self.get_options(path, expect=False)
if opts is None and profile == 'default':
opts = ()
if opts is None:
logger.warning(
f'no such profile for objects: {profile} for path {path}' +
'--maybe entries exist in other profiles')
opts = ()
return map(lambda x: x.strip(), opts)
def get_discoverable_objects(self, profiles):
return it.chain(*map(self._iterate_objects, profiles))
def get_empty_dirs(self, profiles):
paths = []
for profile in profiles:
if profile == 'default':
path = self.EMPTY_DIR_PATH
else:
path = self.EMPTY_DIR_PROFILE_PATH.format(profile)
opts = self.get_options(path)
if opts is None:
## warnings for missing empty directory entries is worth it
# logger.warning(
# f'no such profile for objects: {profile} for path {path}' +
# '--maybe entries exist in other profiles')
pass
else:
paths.extend(opts)
return map(lambda x: Path(x).expanduser().absolute(), paths)
def _get_path(self, name):
return Path(self.get_option(name, expect=True)).expanduser().absolute()
@property
def dist_dir(self):
return self._get_path(f'{self.ROOT}.local.dist_dir')
@dist_dir.setter
def dist_dir(self, dist_dir):
if self.default_vars is None:
self.default_vars = {}
self.default_vars[f'{self.ROOT}.local.dist_dir'] = dist_dir
@property
def wheel_dir_name(self):
return self._get_path(f'{self.ROOT}.local.wheels_dir')
@property
def bootstrap_script_file(self):
return Path(self.dist_dir, 'bootstrap.sh')
|
the-stack_106_29239 | """
==============
SGD: Penalties
==============
Contours of where the penalty is equal to 1
for the three penalties L1, L2 and elastic-net.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
line = np.linspace(-1.5, 1.5, 1001)
xx, yy = np.meshgrid(line, line)
l2 = xx ** 2 + yy ** 2
l1 = np.abs(xx) + np.abs(yy)
rho = 0.5
elastic_net = rho * l1 + (1 - rho) * l2
plt.figure(figsize=(10, 10), dpi=100)
ax = plt.gca()
elastic_net_contour = plt.contour(xx, yy, elastic_net, levels=[1],
colors=elastic_net_color)
l2_contour = plt.contour(xx, yy, l2, levels=[1], colors=l2_color)
l1_contour = plt.contour(xx, yy, l1, levels=[1], colors=l1_color)
ax.set_aspect("equal")
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
plt.clabel(elastic_net_contour, inline=1, fontsize=18,
fmt={1.0: 'elastic-net'}, manual=[(-1, -1)])
plt.clabel(l2_contour, inline=1, fontsize=18,
fmt={1.0: 'L2'}, manual=[(-1, -1)])
plt.clabel(l1_contour, inline=1, fontsize=18,
fmt={1.0: 'L1'}, manual=[(-1, -1)])
plt.tight_layout()
plt.show()
|
the-stack_106_29241 | # -*- coding: utf-8 -*-
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
from keras.models import Model
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras import backend as K
from keras_frcnn.RoiPoolingConv import RoiPoolingConv
def get_weight_path():
if K.image_dim_ordering() == 'th':
print('pretrained weights not available for VGG with theano backend')
return
else:
return 'vgg16_weights_tf_dim_ordering_tf_kernels.h5'
def get_img_output_length(width, height):
def get_output_length(input_length):
return input_length//16
return get_output_length(width), get_output_length(height)
def nn_base(input_tensor=None, trainable=False):
'''
输入向量
没用,可删去
'''
# Determine proper input shape
if K.image_dim_ordering() == 'th':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
return x
def rpn(base_layers, num_anchors):
'''
定义RPN网络(区域预选网络)
x_class:每一个锚点属于前景还是背景【注:这里使用的是sigmoid激活函数所以其输出的通道数是num_anchors】
x_regr:每一个锚点对应的回归梯度
'''
x = Conv2D(512, (3, 3), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')(base_layers)
x_class = Conv2D(num_anchors, (1, 1), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x)
x_regr = Conv2D(num_anchors * 4, (1, 1), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x)
return [x_class, x_regr, base_layers]
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):
'''
RoiPoolingConv:返回的shape为(1, 32, 7, 7, 512)含义是batch_size,预选框的个数,特征图宽,特征图高度,特征图深度
TimeDistributed:输入至少为3D张量,下标为1的维度将被认为是时间维。即对以一个维度下的变量当作一个完整变量来看待本文是32。你要实现的目的就是对32个预选宽提出的32个图片做出判断。
out_class的shape:(?, 32, 21);out_regr的shape:(?, 32, 80)
'''
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 7
input_shape = (num_rois,7,7,512)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (num_rois,512,7,7)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])
out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
|
the-stack_106_29242 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run performance tests locally or remotely."""
from __future__ import print_function
import argparse
import collections
import itertools
import json
import multiprocessing
import os
import pipes
import re
import subprocess
import sys
import tempfile
import time
import traceback
import uuid
import six
import performance.scenario_config as scenario_config
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = 'jenkins'
class QpsWorkerJob:
"""Encapsulates a qps worker server job."""
def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
self._spec = spec
self.language = language
self.host_and_port = host_and_port
self._job = None
self.perf_file_base_name = perf_file_base_name
def start(self):
self._job = jobset.Job(
self._spec, newline_on_success=True, travis=True, add_env={})
def is_running(self):
"""Polls a job and returns True if given job is still running."""
return self._job and self._job.state() == jobset._RUNNING
def kill(self):
if self._job:
self._job.kill()
self._job = None
def create_qpsworker_job(language,
shortname=None,
port=10000,
remote_host=None,
perf_cmd=None):
cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
if remote_host:
host_and_port = '%s:%s' % (remote_host, port)
else:
host_and_port = 'localhost:%s' % port
perf_file_base_name = None
if perf_cmd:
perf_file_base_name = '%s-%s' % (host_and_port, shortname)
# specify -o output file so perf.data gets collected when worker stopped
cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
] + cmdline
worker_timeout = 3 * 60
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
ssh_cmd = ['ssh']
cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
ssh_cmd.extend([
str(user_at_host),
'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s'
% ' '.join(cmdline)
])
cmdline = ssh_cmd
jobspec = jobset.JobSpec(
cmdline=cmdline,
shortname=shortname,
timeout_seconds=
worker_timeout, # workers get restarted after each scenario
verbose_success=True)
return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
def create_scenario_jobspec(scenario_json,
workers,
remote_host=None,
bq_result_table=None,
server_cpu_load=0):
"""Runs one scenario using QPS driver."""
# setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
if bq_result_table:
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += 'tools/run_tests/performance/run_qps_driver.sh '
cmd += '--scenarios_json=%s ' % pipes.quote(
json.dumps({
'scenarios': [scenario_json]
}))
cmd += '--scenario_result_file=scenario_result.json '
if server_cpu_load != 0:
cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host, pipes.quote(cmd))
return jobset.JobSpec(
cmdline=[cmd],
shortname='qps_json_driver.%s' % scenario_json['name'],
timeout_seconds=12 * 60,
shell=True,
verbose_success=True)
def create_quit_jobspec(workers, remote_host=None):
"""Runs quit using QPS driver."""
# setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(
w.host_and_port for w in workers)
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host, pipes.quote(cmd))
return jobset.JobSpec(
cmdline=[cmd],
shortname='qps_json_driver.quit',
timeout_seconds=3 * 60,
shell=True,
verbose_success=True)
def create_netperf_jobspec(server_host='localhost',
client_host=None,
bq_result_table=None):
"""Runs netperf benchmark."""
cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
if bq_result_table:
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
if client_host:
# If netperf is running remotely, the env variables populated by Jenkins
# won't be available on the client, but we need them for uploading results
# to BigQuery.
jenkins_job_name = os.getenv('JOB_NAME')
if jenkins_job_name:
cmd += 'JOB_NAME="%s" ' % jenkins_job_name
jenkins_build_number = os.getenv('BUILD_NUMBER')
if jenkins_build_number:
cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
cmd += 'tools/run_tests/performance/run_netperf.sh'
if client_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host, pipes.quote(cmd))
return jobset.JobSpec(
cmdline=[cmd],
shortname='netperf',
timeout_seconds=60,
shell=True,
verbose_success=True)
def archive_repo(languages):
"""Archives local version of repo including submodules."""
# Directory contains symlinks that can't be correctly untarred on Windows
# so we just skip them as a workaround.
# See https://github.com/grpc/grpc/issues/16334
bad_symlinks_dir = '../grpc/third_party/libcxx/test/std/experimental/filesystem/Inputs/static_test_env'
cmdline = [
'tar', '--exclude', bad_symlinks_dir, '-cf', '../grpc.tar', '../grpc/'
]
if 'java' in languages:
cmdline.append('../grpc-java')
if 'go' in languages:
cmdline.append('../grpc-go')
if 'node' in languages or 'node_purejs' in languages:
cmdline.append('../grpc-node')
archive_job = jobset.JobSpec(
cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
jobset.message('START', 'Archiving local repository.', do_newline=True)
num_failures, _ = jobset.run(
[archive_job], newline_on_success=True, maxjobs=1)
if num_failures == 0:
jobset.message(
'SUCCESS',
'Archive with local repository created successfully.',
do_newline=True)
else:
jobset.message(
'FAILED', 'Failed to archive local repository.', do_newline=True)
sys.exit(1)
def prepare_remote_hosts(hosts, prepare_local=False):
"""Prepares remote hosts (and maybe prepare localhost as well)."""
prepare_timeout = 10 * 60
prepare_jobs = []
for host in hosts:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
prepare_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
shortname='remote_host_prepare.%s' % host,
environ={'USER_AT_HOST': user_at_host},
timeout_seconds=prepare_timeout))
if prepare_local:
# Prepare localhost as well
prepare_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/kill_workers.sh'],
shortname='local_prepare',
timeout_seconds=prepare_timeout))
jobset.message('START', 'Preparing hosts.', do_newline=True)
num_failures, _ = jobset.run(
prepare_jobs, newline_on_success=True, maxjobs=10)
if num_failures == 0:
jobset.message(
'SUCCESS', 'Prepare step completed successfully.', do_newline=True)
else:
jobset.message(
'FAILED', 'Failed to prepare remote hosts.', do_newline=True)
sys.exit(1)
def build_on_remote_hosts(hosts,
languages=scenario_config.LANGUAGES.keys(),
build_local=False):
"""Builds performance worker on remote hosts (and maybe also locally)."""
build_timeout = 45 * 60
# Kokoro VMs (which are local only) do not have caching, so they need more time to build
local_build_timeout = 60 * 60
build_jobs = []
for host in hosts:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
build_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
languages,
shortname='remote_host_build.%s' % host,
environ={'USER_AT_HOST': user_at_host,
'CONFIG': 'opt'},
timeout_seconds=build_timeout))
if build_local:
# Build locally as well
build_jobs.append(
jobset.JobSpec(
cmdline=['tools/run_tests/performance/build_performance.sh'] +
languages,
shortname='local_build',
environ={'CONFIG': 'opt'},
timeout_seconds=local_build_timeout))
jobset.message('START', 'Building.', do_newline=True)
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=10)
if num_failures == 0:
jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
else:
jobset.message('FAILED', 'Build failed.', do_newline=True)
sys.exit(1)
def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
"""Creates QPS workers (but does not start them)."""
if not worker_hosts:
# run two workers locally (for each language)
workers = [(None, 10000), (None, 10010)]
elif len(worker_hosts) == 1:
# run two workers on the remote host (for each language)
workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
else:
# run one worker per each remote host (for each language)
workers = [(worker_host, 10000) for worker_host in worker_hosts]
return [
create_qpsworker_job(
language,
shortname='qps_worker_%s_%s' % (language, worker_idx),
port=worker[1] + language.worker_port_offset(),
remote_host=worker[0],
perf_cmd=perf_cmd)
for language in languages
for worker_idx, worker in enumerate(workers)
]
def perf_report_processor_job(worker_host, perf_base_name, output_filename,
flame_graph_reports):
print('Creating perf report collection job for %s' % worker_host)
cmd = ''
if worker_host != 'localhost':
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
user_at_host, output_filename, flame_graph_reports, perf_base_name)
else:
cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
output_filename, flame_graph_reports, perf_base_name)
return jobset.JobSpec(
cmdline=cmd,
timeout_seconds=3 * 60,
shell=True,
verbose_success=True,
shortname='process perf report')
Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
def create_scenarios(languages,
workers_by_lang,
remote_host=None,
regex='.*',
category='all',
bq_result_table=None,
netperf=False,
netperf_hosts=[],
server_cpu_load=0):
"""Create jobspecs for scenarios to run."""
all_workers = [
worker for workers in workers_by_lang.values() for worker in workers
]
scenarios = []
_NO_WORKERS = []
if netperf:
if not netperf_hosts:
netperf_server = 'localhost'
netperf_client = None
elif len(netperf_hosts) == 1:
netperf_server = netperf_hosts[0]
netperf_client = netperf_hosts[0]
else:
netperf_server = netperf_hosts[0]
netperf_client = netperf_hosts[1]
scenarios.append(
Scenario(
create_netperf_jobspec(
server_host=netperf_server,
client_host=netperf_client,
bq_result_table=bq_result_table), _NO_WORKERS, 'netperf'))
for language in languages:
for scenario_json in language.scenarios():
if re.search(regex, scenario_json['name']):
categories = scenario_json.get('CATEGORIES',
['scalable', 'smoketest'])
if category in categories or category == 'all':
workers = workers_by_lang[str(language)][:]
# 'SERVER_LANGUAGE' is an indicator for this script to pick
# a server in different language.
custom_server_lang = scenario_json.get(
'SERVER_LANGUAGE', None)
custom_client_lang = scenario_json.get(
'CLIENT_LANGUAGE', None)
scenario_json = scenario_config.remove_nonproto_fields(
scenario_json)
if custom_server_lang and custom_client_lang:
raise Exception(
'Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
'in the same scenario')
if custom_server_lang:
if not workers_by_lang.get(custom_server_lang, []):
print('Warning: Skipping scenario %s as' %
scenario_json['name'])
print(
'SERVER_LANGUAGE is set to %s yet the language has '
'not been selected with -l' %
custom_server_lang)
continue
for idx in range(0, scenario_json['num_servers']):
# replace first X workers by workers of a different language
workers[idx] = workers_by_lang[custom_server_lang][
idx]
if custom_client_lang:
if not workers_by_lang.get(custom_client_lang, []):
print('Warning: Skipping scenario %s as' %
scenario_json['name'])
print(
'CLIENT_LANGUAGE is set to %s yet the language has '
'not been selected with -l' %
custom_client_lang)
continue
for idx in range(scenario_json['num_servers'],
len(workers)):
# replace all client workers by workers of a different language,
# leave num_server workers as they are server workers.
workers[idx] = workers_by_lang[custom_client_lang][
idx]
scenario = Scenario(
create_scenario_jobspec(
scenario_json, [w.host_and_port for w in workers],
remote_host=remote_host,
bq_result_table=bq_result_table,
server_cpu_load=server_cpu_load), workers,
scenario_json['name'])
scenarios.append(scenario)
return scenarios
def finish_qps_workers(jobs, qpsworker_jobs):
"""Waits for given jobs to finish and eventually kills them."""
retries = 0
num_killed = 0
while any(job.is_running() for job in jobs):
for job in qpsworker_jobs:
if job.is_running():
print('QPS worker "%s" is still running.' % job.host_and_port)
if retries > 10:
print('Killing all QPS workers.')
for job in jobs:
job.kill()
num_killed += 1
retries += 1
time.sleep(3)
print('All QPS workers finished.')
return num_killed
profile_output_files = []
# Collect perf text reports and flamegraphs if perf_cmd was used
# Note the base names of perf text reports are used when creating and processing
# perf data. The scenario name uniqifies the output name in the final
# perf reports directory.
# Alos, the perf profiles need to be fetched and processed after each scenario
# in order to avoid clobbering the output files.
def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
flame_graph_reports):
perf_report_jobs = []
global profile_output_files
for host_and_port in hosts_and_base_names:
perf_base_name = hosts_and_base_names[host_and_port]
output_filename = '%s-%s' % (scenario_name, perf_base_name)
# from the base filename, create .svg output filename
host = host_and_port.split(':')[0]
profile_output_files.append('%s.svg' % output_filename)
perf_report_jobs.append(
perf_report_processor_job(host, perf_base_name, output_filename,
flame_graph_reports))
jobset.message(
'START', 'Collecting perf reports from qps workers', do_newline=True)
failures, _ = jobset.run(
perf_report_jobs, newline_on_success=True, maxjobs=1)
jobset.message(
'END', 'Collecting perf reports from qps workers', do_newline=True)
return failures
def main():
argp = argparse.ArgumentParser(description='Run performance tests.')
argp.add_argument(
'-l',
'--language',
choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
nargs='+',
required=True,
help='Languages to benchmark.')
argp.add_argument(
'--remote_driver_host',
default=None,
help=
'Run QPS driver on given host. By default, QPS driver is run locally.')
argp.add_argument(
'--remote_worker_host',
nargs='+',
default=[],
help='Worker hosts where to start QPS workers.')
argp.add_argument(
'--dry_run',
default=False,
action='store_const',
const=True,
help='Just list scenarios to be run, but don\'t run them.')
argp.add_argument(
'-r',
'--regex',
default='.*',
type=str,
help='Regex to select scenarios to run.')
argp.add_argument(
'--bq_result_table',
default=None,
type=str,
help='Bigquery "dataset.table" to upload results to.')
argp.add_argument(
'--category',
choices=['smoketest', 'all', 'scalable', 'sweep'],
default='all',
help='Select a category of tests to run.')
argp.add_argument(
'--netperf',
default=False,
action='store_const',
const=True,
help='Run netperf benchmark as one of the scenarios.')
argp.add_argument(
'--server_cpu_load',
default=0,
type=int,
help='Select a targeted server cpu load to run. 0 means ignore this flag'
)
argp.add_argument(
'-x',
'--xml_report',
default='report.xml',
type=str,
help='Name of XML report file to generate.')
argp.add_argument(
'--perf_args',
help=('Example usage: "--perf_args=record -F 99 -g". '
'Wrap QPS workers in a perf command '
'with the arguments to perf specified here. '
'".svg" flame graph profiles will be '
'created for each Qps Worker on each scenario. '
'Files will output to "<repo_root>/<args.flame_graph_reports>" '
'directory. Output files from running the worker '
'under perf are saved in the repo root where its ran. '
'Note that the perf "-g" flag is necessary for '
'flame graphs generation to work (assuming the binary '
'being profiled uses frame pointers, check out '
'"--call-graph dwarf" option using libunwind otherwise.) '
'Also note that the entire "--perf_args=<arg(s)>" must '
'be wrapped in quotes as in the example usage. '
'If the "--perg_args" is unspecified, "perf" will '
'not be used at all. '
'See http://www.brendangregg.com/perf.html '
'for more general perf examples.'))
argp.add_argument(
'--skip_generate_flamegraphs',
default=False,
action='store_const',
const=True,
help=('Turn flame graph generation off. '
'May be useful if "perf_args" arguments do not make sense for '
'generating flamegraphs (e.g., "--perf_args=stat ...")'))
argp.add_argument(
'-f',
'--flame_graph_reports',
default='perf_reports',
type=str,
help=
'Name of directory to output flame graph profiles to, if any are created.'
)
argp.add_argument(
'-u',
'--remote_host_username',
default='',
type=str,
help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
args = argp.parse_args()
global _REMOTE_HOST_USERNAME
if args.remote_host_username:
_REMOTE_HOST_USERNAME = args.remote_host_username
languages = set(
scenario_config.LANGUAGES[l]
for l in itertools.chain.from_iterable(
six.iterkeys(scenario_config.LANGUAGES) if x == 'all' else [x]
for x in args.language))
# Put together set of remote hosts where to run and build
remote_hosts = set()
if args.remote_worker_host:
for host in args.remote_worker_host:
remote_hosts.add(host)
if args.remote_driver_host:
remote_hosts.add(args.remote_driver_host)
if not args.dry_run:
if remote_hosts:
archive_repo(languages=[str(l) for l in languages])
prepare_remote_hosts(remote_hosts, prepare_local=True)
else:
prepare_remote_hosts([], prepare_local=True)
build_local = False
if not args.remote_driver_host:
build_local = True
if not args.dry_run:
build_on_remote_hosts(
remote_hosts,
languages=[str(l) for l in languages],
build_local=build_local)
perf_cmd = None
if args.perf_args:
print('Running workers under perf profiler')
# Expect /usr/bin/perf to be installed here, as is usual
perf_cmd = ['/usr/bin/perf']
perf_cmd.extend(re.split('\s+', args.perf_args))
qpsworker_jobs = create_qpsworkers(
languages, args.remote_worker_host, perf_cmd=perf_cmd)
# get list of worker addresses for each language.
workers_by_lang = dict([(str(language), []) for language in languages])
for job in qpsworker_jobs:
workers_by_lang[str(job.language)].append(job)
scenarios = create_scenarios(
languages,
workers_by_lang=workers_by_lang,
remote_host=args.remote_driver_host,
regex=args.regex,
category=args.category,
bq_result_table=args.bq_result_table,
netperf=args.netperf,
netperf_hosts=args.remote_worker_host,
server_cpu_load=args.server_cpu_load)
if not scenarios:
raise Exception('No scenarios to run')
total_scenario_failures = 0
qps_workers_killed = 0
merged_resultset = {}
perf_report_failures = 0
for scenario in scenarios:
if args.dry_run:
print(scenario.name)
else:
scenario_failures = 0
try:
for worker in scenario.workers:
worker.start()
jobs = [scenario.jobspec]
if scenario.workers:
jobs.append(
create_quit_jobspec(
scenario.workers,
remote_host=args.remote_driver_host))
scenario_failures, resultset = jobset.run(
jobs, newline_on_success=True, maxjobs=1)
total_scenario_failures += scenario_failures
merged_resultset = dict(
itertools.chain(
six.iteritems(merged_resultset),
six.iteritems(resultset)))
finally:
# Consider qps workers that need to be killed as failures
qps_workers_killed += finish_qps_workers(
scenario.workers, qpsworker_jobs)
if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
workers_and_base_names = {}
for worker in scenario.workers:
if not worker.perf_file_base_name:
raise Exception(
'using perf buf perf report filename is unspecified'
)
workers_and_base_names[
worker.host_and_port] = worker.perf_file_base_name
perf_report_failures += run_collect_perf_profile_jobs(
workers_and_base_names, scenario.name,
args.flame_graph_reports)
# Still write the index.html even if some scenarios failed.
# 'profile_output_files' will only have names for scenarios that passed
if perf_cmd and not args.skip_generate_flamegraphs:
# write the index fil to the output dir, with all profiles from all scenarios/workers
report_utils.render_perf_profiling_results(
'%s/index.html' % args.flame_graph_reports, profile_output_files)
report_utils.render_junit_xml_report(
merged_resultset, args.xml_report, suite_name='benchmarks')
if total_scenario_failures > 0 or qps_workers_killed > 0:
print('%s scenarios failed and %s qps worker jobs killed' %
(total_scenario_failures, qps_workers_killed))
sys.exit(1)
if perf_report_failures > 0:
print('%s perf profile collection jobs failed' % perf_report_failures)
sys.exit(1)
if __name__ == "__main__":
main()
|
the-stack_106_29243 | '''
Created on Apr 1, 2015
@author: bgt
'''
import unittest
import github
from GitHubDao import GitHubDao
import NonSomeFinder
class FunctionalityTest(unittest.TestCase):
def setUp(self):
self.positiveCase="sferik/twitter"
self.negativeCase="tomibgt/GitHubResearchDataMiner"
self.urlToParse="http://tucs.fi/bgt/github.html"
self.gitDao = GitHubDao()
'''
Issue #1
'''
def testPositiveTwitter(self):
self.assertTrue(self.gitDao.usesTwitter(self.positiveCase)[0], self.positiveCase+" not detected as positive case.")
def testNegativeTwitter(self):
self.assertFalse(self.gitDao.usesTwitter(self.negativeCase)[0], self.negativeCase+" not detected as negative case.")
'''
Issue #2
'''
def testUrlParsing(self):
result = self.gitDao.parseRepositoriesFromUrl(self.urlToParse)
self.assertTrue(len(result)==2, "The parser failed to detect two URLs on the test page : "+str(len(result)))
self.assertEqual(result[0], "sferik/twitter", "Missparsed sferik/twitter : "+result[0])
self.assertEqual(result[1], "tomibgt/GitHubResearchDataMiner", "Missparsed tomibgt/GitHubResearchDataMiner : "+result[1])
'''
Issue #3
'''
def testRepositorySearch(self):
result = self.gitDao.findRepositoryNamesWithSearchPhrase("pygithub")
count = 0
foo = False
for repo in result:
count += 1
if repo.id == 3544490:
foo = True
self.assertGreaterEqual(count, 15, "There should be at least 15 repositories discovered with keyphrase 'pygithub'? Found "+str(count))
self.assertTrue(foo, "Couldn't find the PyGithub/PyGithub project (id 3544490)")
'''
Issue #5
'''
def testAnalysisOfBlockedRepository(self):
testrepo = self.gitDao.github.get_repo("bs/starling")
testlist = [testrepo]
NonSomeFinder.analyseRepositories(testlist)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
the-stack_106_29244 | from complementos import *
eps = 1e-10
#1
def Leer_Datos(filename):
df = pd.read_csv(filename, sep = "\t")
np_arr = df.to_numpy()
np_arr = np_arr.T
temp = [np.ones(np_arr.shape[1]).tolist()]
for i in np_arr:
temp.append(i.tolist())
answer = np.asarray(temp)
return answer
#2
def Normalizar_Datos(np_arr):
Media, Desviacion = desviacion_estandar_2(np_arr[1:-1])
np_arr[1:-1] = (np_arr[1:-1] - Media) / Desviacion
return np_arr, Media, Desviacion
def Normalizar_Datos_MD(np_arr, Media, Desviacion):
np_arr[1:-1] = (np_arr[1:-1] - Media) / Desviacion
return answer
#3
def Sigmoidal(X, theta):
return 1/(1 + np.exp(-np.dot(theta.T, X) + eps) + eps)
#4
def Calcular_Funcion_Costo(X, theta, Y):
return -np.sum(Y * np.log(Sigmoidal(X, theta) + eps) + (1 - Y) * np.log(1 - Sigmoidal(X, theta) + eps)) / Y.shape[1]
#5
def Calcular_Gradiente(X, theta, Y):
return np.sum(np.dot(X, (Sigmoidal(X, theta) - Y).T), axis = 1, keepdims = True) / Y.shape[1]
#7
def Calcular_Accuracy(X, theta, Y):
predicciones = (Sigmoidal(X, theta) >= .5).astype(int)
comparacion = (predicciones == Y).astype(float)
#print(comparacion)
unique, counts = np.unique(comparacion, return_counts = True)
dict_t = dict(zip(unique, counts))
return (dict_t[1] / comparacion.shape[1])
#6
def Gradiente_Descendiente(X, theta, Y, iteraciones = 3501, learning_rate = 0.4, step = 500):
lista_costos = []
lista_accuracy = []
lista_thetas = []
for it in range(1, iteraciones):
theta = theta - learning_rate * Calcular_Gradiente(X, theta, Y)
if it % step == 0:
lista_costos.append(Calcular_Funcion_Costo(X, theta, Y))
lista_accuracy.append(Calcular_Accuracy(X, theta, Y))
lista_thetas.append(theta)
return theta, lista_costos, lista_accuracy, lista_thetas
#8
def Crear_k_folds(np_arr, k = 3): # Only works with y = 0 or 1
unique, counts = np.unique(np_arr[-1], return_counts = True)
np_arr = np_arr.T
dict_unique = {}
for i in unique:
dict_unique[i] = []
for i in np_arr:
dict_unique[i[-1]].append(i.tolist())
dict_answer = {}
for i in range(k):
dict_answer["k" + str(i)] = []
for i in range(k - 1):
for u, c in zip(unique, counts):
for j in range(int(c / k) * i, int(c / k) * (i + 1)):
dict_answer["k" + str(i)].append(dict_unique[u][j])
for u, c in zip(unique, counts):
for j in range(int(c / k) * (k - 1), c):
dict_answer["k" + str(k - 1)].append(dict_unique[u][j])
for i in range(k):
temp = np.array(dict_answer["k" + str(i)])
np.random.shuffle(temp)
dict_answer["k" + str(i)] = temp.T
return dict_answer
|
the-stack_106_29245 | import os, sys
import numpy as np
import matplotlib.pyplot as plt
import functools
import argparse
import inspect
def parse(func):
"""
Quick and dirty way to make any main with optional keyword arguments parsable from the command line.
"""
@functools.wraps(func)
def wrapper(**kwargs):
# Get default kwargs
signature_kwargs = {k:v.default for k, v in inspect.signature(func).parameters.items()}
# Update default values with values of caller
signature_kwargs.update(kwargs)
# Parse kwargs
parser = argparse.ArgumentParser()
for key, value in signature_kwargs.items():
value_type = type(value)
if isinstance(value, bool):
value_type = bool_parse
parser.add_argument(f'--{key}', dest=key, default=value, type=value_type)
kwargs = vars(parser.parse_args())
# Returns the original func with new kwargs
return func(**kwargs)
return wrapper
def bool_parse(arg):
if arg.lower() in ('true', 't', 'yes', 'y', '1'):
return True
elif arg.lower() in ('false', 'f', 'no', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def identity_func(arg):
return arg
def identity_method(self, arg):
return arg
def compute_subplots_shape(N, aspect_ratio=9/16):
"""
Returns the shape (n, m) of the subplots that will fit N images with respect to the given aspect_ratio.
"""
if aspect_ratio == 0:
return N, 1
n = int(np.sqrt(aspect_ratio*N))
m = int(np.sqrt(1/aspect_ratio*N))
while m*n < N:
if n/m <= aspect_ratio:
n += 1
else:
m += 1
return n, m
def make_fig_axes(N, aspect_ratio=9/16):
n, m = compute_subplots_shape(N)
fig, axes = plt.subplots(n, m)
# Reshaping axes
if n == 1 and m == 1:
axes = [[axes]]
elif n == 1 or m == 1:
axes = [axes]
axes = [ax for line_axes in axes for ax in line_axes]
for ax in axes[N:]:
ax.axis('off')
return fig, axes[:N]
def split_int(n, k):
"""
Equivalent of numpy 'array_split' function, but for integers instead of arrays.
Returns n%k tuples of integers with difference equal to (n//k) + 1 and k - n%k tuples of integers with difference equal to n//k.
"""
idx0, idx1 = 0, 0
for i in range(k):
idx0 = idx1
idx1 = idx1 + n//k
if i < n%k:
idx1 += 1
yield (idx0, idx1)
if __name__ == '__main__':
print([i for i in split_int(10,3)])
|
the-stack_106_29246 | import pyb
def test_irq():
# test basic disable/enable
i1 = pyb.disable_irq()
print(i1)
pyb.enable_irq() # by default should enable IRQ
# check that interrupts are enabled by waiting for ticks
pyb.delay(10)
# check nested disable/enable
i1 = pyb.disable_irq()
i2 = pyb.disable_irq()
print(i1, i2)
pyb.enable_irq(i2)
pyb.enable_irq(i1)
# check that interrupts are enabled by waiting for ticks
pyb.delay(10)
test_irq()
|
the-stack_106_29247 | # -*- coding: utf-8 -*-
"""Implements classes for generating data by schema."""
from typing import Any, Callable, Final, Iterator, List, Optional, Sequence
from mimesis.exceptions import FieldError, SchemaError
from mimesis.locales import Locale
from mimesis.providers.generic import Generic
from mimesis.typing import JSON, SchemaType, Seed
__all__ = ["BaseField", "Field", "Schema"]
class BaseField:
"""
BaseField is a class for generating data by the name of the method.
Instance of this object takes any string which represents the name
of any method of any supported data provider (:class:`~mimesis.Generic`)
and the ``**kwargs`` of the method.
See :class:`~mimesis.schema.BaseField.perform` for more details.
"""
class Meta:
base = True
def __init__(
self,
locale: Locale = Locale.DEFAULT,
seed: Optional[Seed] = None,
providers: Optional[Sequence[Any]] = None,
) -> None:
"""Initialize field.
:param locale: Locale
:param seed: Seed for random.
"""
self._gen = Generic(locale, seed)
if providers:
self._gen.add_providers(*providers)
self._table = {} # type: ignore
def perform(
self,
name: Optional[str] = None,
key: Optional[Callable[[Any], Any]] = None,
**kwargs: Any
) -> Any:
"""Performs the value of the field by its name.
It takes any string which represents the name of any method of
any supported data provider and the ``**kwargs`` of this method.
.. note:: Some data providers have methods with the same names
and in such cases, you can explicitly define that the method
belongs to data-provider ``name='provider.name'`` otherwise
it will return the data from the first provider which
has a method ``name``.
You can apply a *key function* to the result returned by
the method, bt passing a parameter **key** with a callable
object which returns the final result.
:param name: Name of the method.
:param key: A key function (or any other callable object)
which will be applied to result.
:param kwargs: Kwargs of method.
:return: Value which represented by method.
:raises ValueError: if provider not
supported or if field not defined.
"""
if name is None:
raise FieldError()
def tail_parser(tails: str, obj: Any) -> Any:
"""Return method from end of tail.
:param tails: Tail string
:param obj: Search tail from this object
:return last tailed method
"""
provider_name, method_name = tails.split(".", 1)
if "." in method_name:
raise FieldError(name)
attr = getattr(obj, provider_name)
if attr is not None:
try:
return getattr(attr, method_name)
except AttributeError:
raise FieldError(name)
try:
if name not in self._table:
if "." not in name:
# Fix https://github.com/lk-geimfari/mimesis/issues/619
if name == self._gen.choice.Meta.name:
self._table[name] = self._gen.choice
else:
for provider in dir(self._gen):
provider = getattr(self._gen, provider)
if name in dir(provider):
self._table[name] = getattr(provider, name)
else:
self._table[name] = tail_parser(name, self._gen)
result = self._table[name](**kwargs)
if key and callable(key):
return key(result)
return result
except KeyError:
raise FieldError(name)
def __str__(self) -> str:
return "{} <{}>".format(self.__class__.__name__, self._gen.locale)
class Field(BaseField):
"""Greedy field.
The field whcih evaluates immediately.
Example:
>>> _ = Field()
>>> _('username')
Dogtag_1836
"""
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.perform(*args, **kwargs)
class Schema:
"""Class which return list of filled schemas."""
_MIN_ITERATIONS_VALUE: Final = 1
__slots__ = ("_schema",)
def __init__(self, schema: SchemaType) -> None:
"""Initialize schema.
:param schema: A schema (must be a callable object).
"""
if schema and callable(schema):
self._schema = schema
else:
raise SchemaError()
def create(self, iterations: int = 1) -> List[JSON]:
"""Creates a list of a fulfilled schemas.
.. note::
This method evaluates immediately, so be careful on creating
large datasets otherwise you're risking running out of memory.
If you need a lazy version of this method, see
:meth:`iterator`
:param iterations: Number of iterations.
:return: List of fulfilled schemas.
"""
if iterations < self._MIN_ITERATIONS_VALUE:
raise ValueError("The number of iterations must be greater than 0.")
return [self._schema() for _ in range(iterations)]
def iterator(self, iterations: int = 1) -> Iterator[JSON]:
"""Fulfills schema in a lazy way.
:param iterations: Number of iterations.
:return: List of fulfilled schemas.
"""
if iterations < self._MIN_ITERATIONS_VALUE:
raise ValueError("The number of iterations must be greater than 0.")
for item in range(iterations):
yield self._schema()
|
the-stack_106_29248 | import click
from amusement import Parks
@click.command()
@click.argument('name', nargs=1, type=click.Choice(Parks.keys()))
@click.option('--type', type=click.Choice(['rides', 'shows']), prompt='Please choose rides or shows')
def cli(name, type):
park = Parks[name]
if type == 'rides':
print_rides(park.rides())
if type == 'shows':
print(park.shows())
def print_rides(ride_array):
longest_name = max(len(key['name']) for key in ride_array)
one_closed = False in [key['isOpen'] for key in ride_array]
for ride in ride_array:
line = ''
line += ride['name'] + ' ' * (longest_name - len(ride['name']))
line += ' ' * 3
if ride['isOpen'] is True:
line += 'Open'
# make sure the times are aligned b/c closed longer than open
if one_closed:
line += ' ' * 2
else:
line += 'Closed'
line += ' ' * 5
line += str(ride['wait']) + ' mins'
click.echo(line)
if __name__ == "__main__":
cli()
|
the-stack_106_29249 | from typing import Any, Dict, List, Optional, Tuple
from ConfigSpace.configuration_space import Configuration, ConfigurationSpace
import numpy as np
from sklearn.base import ClassifierMixin
from autoPyTorch.pipeline.base_pipeline import BasePipeline
from autoPyTorch.pipeline.components.base_choice import autoPyTorchChoice
from autoPyTorch.pipeline.components.preprocessing.image_preprocessing.normalise.base_normalizer_choice import (
NormalizerChoice
)
from autoPyTorch.pipeline.components.setup.augmentation.image.ImageAugmenter import ImageAugmenter
from autoPyTorch.pipeline.components.setup.early_preprocessor.EarlyPreprocessing import EarlyPreprocessing
# from autoPyTorch.pipeline.components.setup.lr_scheduler.base_scheduler_choice import SchedulerChoice
# from autoPyTorch.pipeline.components.setup.network.base_network_choice import NetworkChoice
# from autoPyTorch.pipeline.components.setup.optimizer.base_optimizer_choice import OptimizerChoice
# from autoPyTorch.pipeline.components.setup.network_initializer.base_network_init_choice import (
# NetworkInitializerChoice
# )
class ImageClassificationPipeline(ClassifierMixin, BasePipeline):
"""This class is a proof of concept to integrate AutoSklearn Components
It implements a pipeline, which includes as steps:
->One preprocessing step
->One neural network
Contrary to the sklearn API it is not possible to enumerate the
possible parameters in the __init__ function because we only know the
available classifiers at runtime. For this reason the user must
specifiy the parameters by passing an instance of
ConfigSpace.configuration_space.Configuration.
Args:
config (Configuration)
The configuration to evaluate.
random_state (Optional[RandomState): random_state is the random number generator
Attributes:
Examples
"""
def __init__(
self,
config: Optional[Configuration] = None,
steps: Optional[List[Tuple[str, autoPyTorchChoice]]] = None,
dataset_properties: Optional[Dict[str, Any]] = None,
include: Optional[Dict[str, Any]] = None,
exclude: Optional[Dict[str, Any]] = None,
random_state: Optional[np.random.RandomState] = None,
init_params: Optional[Dict[str, Any]] = None
):
super().__init__(
config, steps, dataset_properties, include, exclude,
random_state, init_params)
def fit_transformer(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None
) -> Tuple[np.ndarray, Optional[Dict[str, Any]]]:
"""Fits the pipeline given a training (X,y) pair
Args:
X (np.ndarray): features from which to guess targets
y (np.ndarray): classification targets for this task
fit_params (Optional[Dict[str, Any]]]): handy communication dictionary,
so that inter-stages of the pipeline can share information
Returns:
np.ndarray: the transformed features
Optional[Dict[str, Any]]]: A dictionary to share fit informations
within the pipeline stages
"""
if fit_params is None:
fit_params = {}
X, fit_params = super().fit_transformer(
X, y, fit_params=fit_params)
return X, fit_params
def predict_proba(self, X: np.ndarray, batch_size: Optional[int] = None) -> np.ndarray:
"""predict_proba.
Args:
X (np.ndarray): input to the pipeline, from which to guess targets
batch_size (Optional[int]): batch_size controls whether the pipeline
will be called on small chunks of the data. Useful when calling the
predict method on the whole array X results in a MemoryError.
Returns:
np.ndarray: Probabilities of the target being certain class
"""
if batch_size is None:
return super().predict_proba(X)
else:
if not isinstance(batch_size, int):
raise ValueError("Argument 'batch_size' must be of type int, "
"but is '%s'" % type(batch_size))
if batch_size <= 0:
raise ValueError("Argument 'batch_size' must be positive, "
"but is %d" % batch_size)
else:
# Probe for the target array dimensions
target = self.predict_proba(X[0:2].copy())
y = np.zeros((X.shape[0], target.shape[1]),
dtype=np.float32)
for k in range(max(1, int(np.ceil(float(X.shape[0]) / batch_size)))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size, X.shape[0]])
pred_prob = self.predict_proba(X[batch_from:batch_to], batch_size=None)
y[batch_from:batch_to] = pred_prob.astype(np.float32)
return y
def _get_hyperparameter_search_space(self,
dataset_properties: Dict[str, Any],
include: Optional[Dict[str, Any]] = None,
exclude: Optional[Dict[str, Any]] = None,
) -> ConfigurationSpace:
"""Create the hyperparameter configuration space.
For the given steps, and the Choices within that steps,
this procedure returns a configuration space object to
explore.
Args:
include (Optional[Dict[str, Any]]): what hyper-parameter configurations
to honor when creating the configuration space
exclude (Optional[Dict[str, Any]]): what hyper-parameter configurations
to remove from the configuration space
dataset_properties (Optional[Dict[str, Union[str, int]]]): Caracteristics
of the dataset to guide the pipeline choices of components
Returns:
cs (Configuration): The configuration space describing
the SimpleRegressionClassifier.
"""
cs = ConfigurationSpace()
if dataset_properties is None or not isinstance(dataset_properties, dict):
dataset_properties = dict()
if 'target_type' not in dataset_properties:
dataset_properties['target_type'] = 'image_classification'
if dataset_properties['target_type'] != 'image_classification':
dataset_properties['target_type'] = 'image_classification'
# get the base search space given this
# dataset properties. Then overwrite with custom
# classification requirements
cs = self._get_base_search_space(
cs=cs, dataset_properties=dataset_properties,
exclude=exclude, include=include, pipeline=self.steps)
# Here we add custom code, like this with this
# is not a valid configuration
self.configuration_space = cs
self.dataset_properties = dataset_properties
return cs
def _get_pipeline_steps(self, dataset_properties: Optional[Dict[str, Any]],
) -> List[Tuple[str, autoPyTorchChoice]]:
"""
Defines what steps a pipeline should follow.
The step itself has choices given via autoPyTorchChoice.
Returns:
List[Tuple[str, autoPyTorchChoice]]: list of steps sequentially exercised
by the pipeline.
"""
steps = [] # type: List[Tuple[str, autoPyTorchChoice]]
default_dataset_properties = {'target_type': 'image_classification'}
if dataset_properties is not None:
default_dataset_properties.update(dataset_properties)
steps.extend([
("normalizer", NormalizerChoice(default_dataset_properties)),
("preprocessing", EarlyPreprocessing()),
("image_augmenter", ImageAugmenter())
# ("network", NetworkChoice(default_dataset_properties)),
# ("network_init", NetworkInitializerChoice(default_dataset_properties)),
# ("optimizer", OptimizerChoice(default_dataset_properties)),
# ("lr_scheduler", SchedulerChoice(default_dataset_properties)),
])
return steps
def _get_estimator_hyperparameter_name(self) -> str:
"""
Returns the name of the current estimator.
Returns:
str: name of the pipeline type
"""
return "image_classifier"
|
the-stack_106_29253 | # -*- coding: utf-8 -*-
import tempfile
import threading
from urllib.parse import quote
import json
import datetime
import time
import math
import re
import sys
import os
try:
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
except ImportError as ie:
print(ie)
sys.exit('You can install missing modules with `pip3 install [modulename]`')
from GoogleScraper.scraping import SearchEngineScrape, SeleniumSearchError, get_base_search_url_by_search_engine, MaliciousRequestDetected
from GoogleScraper.user_agents import random_user_agent
import logging
logger = logging.getLogger(__name__)
def get_selenium_scraper_by_search_engine_name(config, search_engine_name, *args, **kwargs):
"""Get the appropriate selenium scraper for the given search engine name.
Args:
search_engine_name: The search engine name.
args: The arguments for the target search engine instance creation.
kwargs: The keyword arguments for the target search engine instance creation.
Returns;
Either a concrete SelScrape instance specific for the given search engine or the abstract SelScrape object.
"""
class_name = search_engine_name[0].upper() + search_engine_name[1:].lower() + 'SelScrape'
ns = globals()
if class_name in ns:
return ns[class_name](config, *args, **kwargs)
return SelScrape(config, *args, **kwargs)
class SelScrape(SearchEngineScrape, threading.Thread):
"""Instances of this class make use of selenium browser objects to query the search engines on a high level.
"""
next_page_selectors = {
'google': '#pnnext',
'yandex': '.pager__button_kind_next',
'bing': '.sb_pagN',
'yahoo': '#pg-next',
'baidu': '.n',
'ask': '#paging div a.txt3.l_nu',
'blekko': '',
'duckduckgo': '',
'googleimg': '#pnnext',
'baiduimg': '.n',
}
input_field_selectors = {
'google': (By.NAME, 'q'),
'yandex': (By.NAME, 'text'),
'bing': (By.NAME, 'q'),
'yahoo': (By.NAME, 'p'),
'baidu': (By.NAME, 'wd'),
'duckduckgo': (By.NAME, 'q'),
'ask': (By.NAME, 'q'),
'blekko': (By.NAME, 'q'),
'google': (By.NAME, 'q'),
'googleimg': (By.NAME, 'as_q'),
'baiduimg': (By.NAME, 'word'),
}
param_field_selectors = {
'googleimg': {
'image_type': (By.ID, 'imgtype_input'),
'image_size': (By.ID, 'imgsz_input'),
},
}
search_params = {
'googleimg': {
'image_type': None,
'image_size': None,
},
}
normal_search_locations = {
'google': 'https://www.google.com/',
'yandex': 'http://www.yandex.ru/',
'bing': 'http://www.bing.com/',
'yahoo': 'https://yahoo.com/',
'baidu': 'http://baidu.com/',
'duckduckgo': 'https://duckduckgo.com/',
'ask': 'http://ask.com/',
'blekko': 'http://blekko.com/',
}
image_search_locations = {
'google': 'https://www.google.com/imghp',
'yandex': 'http://yandex.ru/images/',
'bing': 'https://www.bing.com/?scope=images',
'yahoo': 'http://images.yahoo.com/',
'baidu': 'http://image.baidu.com/',
'duckduckgo': None, # duckduckgo doesnt't support direct image search
'ask': 'http://www.ask.com/pictures/',
'blekko': None,
'googleimg':'https://www.google.com/advanced_image_search',
'baiduimg': 'http://image.baidu.com/',
}
def __init__(self, config, *args, captcha_lock=None, browser_num=1, **kwargs):
"""Create a new SelScraper thread Instance.
Args:
captcha_lock: To sync captcha solving (stdin)
proxy: Optional, if set, use the proxy to route all scrapign through it.
browser_num: A unique, semantic number for each thread.
"""
self.search_input = None
threading.Thread.__init__(self)
SearchEngineScrape.__init__(self, config, *args, **kwargs)
self.browser_type = self.config.get('sel_browser', 'chrome').lower()
self.browser_num = browser_num
self.captcha_lock = captcha_lock
self.scrape_method = 'selenium'
self.xvfb_display = self.config.get('xvfb_display', None)
self.search_param_values = self._get_search_param_values()
# get the base search url based on the search engine.
self.base_search_url = get_base_search_url_by_search_engine(self.config, self.search_engine_name, self.scrape_method)
super().instance_creation_info(self.__class__.__name__)
def set_proxy(self):
"""Install a proxy on the communication channel."""
def switch_proxy(self, proxy):
"""Switch the proxy on the communication channel."""
def proxy_check(self, proxy):
assert self.proxy and self.webdriver, 'Scraper instance needs valid webdriver and proxy instance to make the proxy check'
online = False
status = 'Proxy check failed: {host}:{port} is not used while requesting'.format(**self.proxy.__dict__)
ipinfo = {}
try:
self.webdriver.get(self.config.get('proxy_info_url'))
try:
text = re.search(r'(\{.*?\})', self.webdriver.page_source, flags=re.DOTALL).group(0)
ipinfo = json.loads(text)
except ValueError as v:
logger.critical(v)
except Exception as e:
status = str(e)
if 'ip' in ipinfo and ipinfo['ip']:
online = True
status = 'Proxy is working.'
else:
logger.warning(status)
super().update_proxy_status(status, ipinfo, online)
return online
def _save_debug_screenshot(self):
"""
Saves a debug screenshot of the browser window to figure
out what went wrong.
"""
tempdir = tempfile.gettempdir()
location = os.path.join(tempdir, '{}_{}_debug_screenshot.png'.format(self.search_engine_name, self.browser_type))
self.webdriver.get_screenshot_as_file(location)
def _set_xvfb_display(self):
# TODO: should we check the format of the config?
if self.xvfb_display:
os.environ['DISPLAY'] = self.xvfb_display
def _get_webdriver(self):
"""Return a webdriver instance and set it up with the according profile/ proxies.
Chrome is quite fast, but not as stealthy as PhantomJS.
Returns:
The appropriate webdriver mode according to self.browser_type. If no webdriver mode
could be found, return False.
"""
if self.browser_type == 'chrome':
return self._get_Chrome()
elif self.browser_type == 'firefox':
return self._get_Firefox()
elif self.browser_type == 'phantomjs':
return self._get_PhantomJS()
return False
def _get_Chrome(self):
try:
if self.proxy:
chrome_ops = webdriver.ChromeOptions()
chrome_ops.add_argument(
'--proxy-server={}://{}:{}'.format(self.proxy.proto, self.proxy.host, self.proxy.port))
self.webdriver = webdriver.Chrome(chrome_options=chrome_ops)
else:
self.webdriver = webdriver.Chrome()#service_log_path='/tmp/chromedriver_log.log')
return True
except WebDriverException as e:
# we don't have a chrome executable or a chrome webdriver installed
raise
return False
def _get_Firefox(self):
try:
if self.proxy:
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type",
1) # this means that the proxy is user set, regardless of the type
if self.proxy.proto.lower().startswith('socks'):
profile.set_preference("network.proxy.socks", self.proxy.host)
profile.set_preference("network.proxy.socks_port", self.proxy.port)
profile.set_preference("network.proxy.socks_version", 5 if self.proxy.proto[-1] == '5' else 4)
profile.update_preferences()
elif self.proxy.proto == 'http':
profile.set_preference("network.proxy.http", self.proxy.host)
profile.set_preference("network.proxy.http_port", self.proxy.port)
else:
raise ValueError('Invalid protocol given in proxyfile.')
profile.update_preferences()
self.webdriver = webdriver.Firefox(firefox_profile=profile)
else:
self.webdriver = webdriver.Firefox()
return True
except WebDriverException as e:
# reaching here is bad, since we have no available webdriver instance.
logger.error(e)
return False
def _get_PhantomJS(self):
try:
service_args = []
if self.proxy:
service_args.extend([
'--proxy={}:{}'.format(self.proxy.host, self.proxy.port),
'--proxy-type={}'.format(self.proxy.proto),
])
if self.proxy.username and self.proxy.password:
service_args.append(
'--proxy-auth={}:{}'.format(self.proxy.username, self.proxy.password)
)
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = random_user_agent(only_desktop=True)
self.webdriver = webdriver.PhantomJS(service_args=service_args, desired_capabilities=dcap)
return True
except WebDriverException as e:
logger.error(e)
return False
def handle_request_denied(self, status_code):
"""Checks whether Google detected a potentially harmful request.
Whenever such potential abuse is detected, Google shows an captcha.
This method just blocks as long as someone entered the captcha in the browser window.
When the window is not visible (For example when using PhantomJS), this method
makes a png from the html code and shows it to the user, which should enter it in a command
line.
Returns:
The search input field.
Raises:
MaliciousRequestDetected when there was not way to stp Google From denying our requests.
"""
# selenium webdriver objects have no status code :/
super().handle_request_denied('400')
needles = self.malicious_request_needles[self.search_engine_name]
if needles and needles['inurl'] in self.webdriver.current_url \
and needles['inhtml'] in self.webdriver.page_source:
if self.config.get('manual_captcha_solving', False):
with self.captcha_lock:
import tempfile
tf = tempfile.NamedTemporaryFile('wb')
tf.write(self.webdriver.get_screenshot_as_png())
import webbrowser
webbrowser.open('file://{}'.format(tf.name))
solution = input('enter the captcha please...')
self.webdriver.find_element_by_name('submit').send_keys(solution + Keys.ENTER)
try:
self.search_input = WebDriverWait(self.webdriver, 5).until(
EC.visibility_of_element_located(self._get_search_input_field()))
except TimeoutException:
raise MaliciousRequestDetected('Requesting with this ip is not possible at the moment.')
tf.close()
else:
# Just wait until the user solves the captcha in the browser window
# 10 hours if needed :D
logger.info('Waiting for user to solve captcha')
return self._wait_until_search_input_field_appears(10 * 60 * 60)
def build_search(self):
"""Build the search for SelScrapers"""
assert self.webdriver, 'Webdriver needs to be ready to build the search'
if self.config.get('search_type', 'normal') == 'image':
starting_point = self.image_search_locations[self.search_engine_name]
else:
starting_point = self.base_search_url
self.webdriver.get(starting_point)
def _get_search_param_values(self):
search_param_values = {}
if self.search_engine_name in self.search_params:
for param_key in self.search_params[self.search_engine_name]:
cfg = self.config.get(param_key, None)
if cfg:
search_param_values[param_key] = cfg
return search_param_values
def _get_search_input_field(self):
"""Get the search input field for the current search_engine.
Returns:
A tuple to locate the search field as used by seleniums function presence_of_element_located()
"""
return self.input_field_selectors[self.search_engine_name]
def _get_search_param_fields(self):
if self.search_engine_name in self.param_field_selectors:
return self.param_field_selectors[self.search_engine_name]
else:
return {}
def _wait_until_search_input_field_appears(self, max_wait=5):
"""Waits until the search input field can be located for the current search engine
Args:
max_wait: How long to wait maximally before returning False.
Returns: False if the search input field could not be located within the time
or the handle to the search input field.
"""
def find_visible_search_input(driver):
input_field = driver.find_element(*self._get_search_input_field())
return input_field
try:
search_input = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_input)
return search_input
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search input field: {}'.format(self.name, e))
return False
def _wait_until_search_param_fields_appears(self, max_wait=5):
"""Waits until the search input field contains the query.
Args:
max_wait: How long to wait maximally before returning False.
"""
def find_visible_search_param(driver):
for param, field in self._get_search_param_fields().items():
input_field = driver.find_element(*field)
if not input_field:
return False
return True
try:
fields = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_param)
return fields
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search param field: {}'.format(self.name, e))
return False
def _goto_next_page(self):
"""
Click the next page element,
Returns:
The url of the next page or False if there is no such url
(end of available pages for instance).
"""
next_url = ''
element = self._find_next_page_element()
if hasattr(element, 'click'):
next_url = element.get_attribute('href')
try:
element.click()
except WebDriverException:
# See http://stackoverflow.com/questions/11908249/debugging-element-is-not-clickable-at-point-error
# first move mouse to the next element, some times the element is not visibility, like blekko.com
selector = self.next_page_selectors[self.search_engine_name]
if selector:
try:
next_element = WebDriverWait(self.webdriver, 5).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
webdriver.ActionChains(self.webdriver).move_to_element(next_element).perform()
# wait until the next page link emerges
WebDriverWait(self.webdriver, 8).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
element = self.webdriver.find_element_by_css_selector(selector)
next_url = element.get_attribute('href')
element.click()
except WebDriverException:
pass
# wait until the next page was loaded
if not next_url:
return False
else:
return next_url
def _find_next_page_element(self):
"""Finds the element that locates the next page for any search engine.
Returns:
The element that needs to be clicked to get to the next page or a boolean value to
indicate an error condition.
"""
if self.search_type == 'normal':
selector = self.next_page_selectors[self.search_engine_name]
try:
# wait until the next page link is clickable
WebDriverWait(self.webdriver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))
except (WebDriverException, TimeoutException) as e:
self._save_debug_screenshot()
raise Exception('{}: Cannot locate next page element: {}'.format(self.name, str(e)))
return self.webdriver.find_element_by_css_selector(selector)
elif self.search_type == 'image':
self.page_down()
return True
def wait_until_serp_loaded(self):
"""
This method tries to wait until the page requested is loaded.
We know that the correct page is loaded when self.page_number appears
in the navigation of the page.
"""
if self.search_type == 'normal':
if self.search_engine_name == 'google':
selector = '#navcnt td.cur'
elif self.search_engine_name == 'yandex':
selector = '.pager__item_current_yes font font'
elif self.search_engine_name == 'bing':
selector = 'nav li a.sb_pagS'
elif self.search_engine_name == 'yahoo':
selector = '.compPagination strong'
elif self.search_engine_name == 'baidu':
selector = '#page .fk_cur + .pc'
elif self.search_engine_name == 'duckduckgo':
# no pagination in duckduckgo
pass
elif self.search_engine_name == 'ask':
selector = '#paging .pgcsel .pg'
if self.search_engine_name == 'duckduckgo':
time.sleep(1.5)
else:
try:
WebDriverWait(self.webdriver, 5).\
until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, selector), str(self.page_number)))
except TimeoutException as e:
self._save_debug_screenshot()
content = self.webdriver.find_element_by_css_selector(selector).text
raise Exception('Pagenumber={} did not appear in navigation. Got "{}" instead'\
.format(self.page_number), content)
elif self.search_type == 'image':
self.wait_until_title_contains_keyword()
else:
self.wait_until_title_contains_keyword()
def wait_until_title_contains_keyword(self):
try:
WebDriverWait(self.webdriver, 5).until(EC.title_contains(self.query))
except TimeoutException:
logger.debug(SeleniumSearchError(
'{}: Keyword "{}" not found in title: {}'.format(self.name, self.query, self.webdriver.title)))
def search(self):
"""Search with webdriver.
Fills out the search form of the search engine for each keyword.
Clicks the next link while pages_per_keyword is not reached.
"""
for self.query, self.pages_per_keyword in self.jobs.items():
self.search_input = self._wait_until_search_input_field_appears()
if self.search_input is False and self.config.get('stop_on_detection'):
self.status = 'Malicious request detected'
return
if self.search_input is False:
# @todo: pass status_code
self.search_input = self.handle_request_denied()
if self.search_input:
self.search_input.clear()
time.sleep(.25)
self.search_param_fields = self._get_search_param_fields()
if self.search_param_fields:
wait_res = self._wait_until_search_param_fields_appears()
if wait_res is False:
raise Exception('Waiting search param input fields time exceeds')
for param, field in self.search_param_fields.items():
if field[0] == By.ID:
js_tpl = '''
var field = document.getElementById("%s");
field.setAttribute("value", "%s");
'''
elif field[0] == By.NAME:
js_tpl = '''
var fields = document.getElementsByName("%s");
for (var f in fields) {
f.setAttribute("value", "%s");
}
'''
js_str = js_tpl % (field[1], self.search_param_values[param])
self.webdriver.execute_script(js_str)
try:
self.search_input.send_keys(self.query + Keys.ENTER)
except ElementNotVisibleException:
time.sleep(2)
self.search_input.send_keys(self.query + Keys.ENTER)
self.requested_at = datetime.datetime.utcnow()
else:
logger.debug('{}: Cannot get handle to the input form for keyword {}.'.format(self.name, self.query))
continue
super().detection_prevention_sleep()
super().keyword_info()
for self.page_number in self.pages_per_keyword:
self.wait_until_serp_loaded()
try:
self.html = self.webdriver.execute_script('return document.body.innerHTML;')
except WebDriverException as e:
self.html = self.webdriver.page_source
super().after_search()
# Click the next page link not when leaving the loop
# in the next iteration.
if self.page_number in self.pages_per_keyword:
next_url = self._goto_next_page()
self.requested_at = datetime.datetime.utcnow()
if not next_url:
break
def page_down(self):
"""Scrolls down a page with javascript.
Used for next page in image search mode or when the
next results are obtained by scrolling down a page.
"""
js = '''
var w = window,
d = document,
e = d.documentElement,
g = d.getElementsByTagName('body')[0],
y = w.innerHeight|| e.clientHeight|| g.clientHeight;
window.scrollBy(0,y);
return y;
'''
self.webdriver.execute_script(js)
def run(self):
"""Run the SelScraper."""
self._set_xvfb_display()
if not self._get_webdriver():
raise Exception('{}: Aborting due to no available selenium webdriver.'.format(self.name))
try:
self.webdriver.set_window_size(400, 400)
self.webdriver.set_window_position(400 * (self.browser_num % 4), 400 * (math.floor(self.browser_num // 4)))
except WebDriverException as e:
logger.debug('Cannot set window size: {}'.format(e))
super().before_search()
if self.startable:
self.build_search()
self.search()
if self.webdriver:
self.webdriver.quit()
"""
For most search engines, the normal SelScrape works perfectly, but sometimes
the scraping logic is different for other search engines.
Duckduckgo loads new results on the fly (via ajax) and doesn't support any "next page"
link. Other search engines like gekko.com have a completely different SERP page format.
That's why we need to inherit from SelScrape for specific logic that only applies for the given
search engine.
The following functionality may differ in particular:
- _goto_next_page()
- _get_search_input()
- _wait_until_search_input_field_appears()
- _handle_request_denied()
- wait_until_serp_loaded()
"""
class DuckduckgoSelScrape(SelScrape):
"""
Duckduckgo is a little special since new results are obtained by ajax.
next page thus is then to scroll down.
Furthermore duckduckgo.com doesn't seem to work with Phantomjs. Maybe they block it, but I
don't know how ??!
It cannot be the User-Agent, because I already tried this.
"""
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
self.largest_id = 0
def _goto_next_page(self):
super().page_down()
return 'No more results' not in self.html
def wait_until_serp_loaded(self):
super()._wait_until_search_input_field_appears()
class BlekkoSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def _goto_next_page(self):
pass
class AskSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def wait_until_serp_loaded(self):
def wait_until_keyword_in_url(driver):
try:
return quote(self.query) in driver.current_url or \
self.query.replace(' ', '+') in driver.current_url
except WebDriverException:
pass
WebDriverWait(self.webdriver, 5).until(wait_until_keyword_in_url)
|
the-stack_106_29254 | from collections import OrderedDict, defaultdict
from copy import deepcopy
from typing import Any, Dict
import pytest
from zulipterminal.config.keys import keys_for_command
from zulipterminal.helper import initial_index as helper_initial_index
from zulipterminal.ui_tools.boxes import MessageBox
from zulipterminal.ui_tools.buttons import StreamButton, UserButton
from zulipterminal.version import (
MINIMUM_SUPPORTED_SERVER_VERSION,
SUPPORTED_SERVER_VERSIONS,
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""
Forces all the tests to work offline.
"""
monkeypatch.delattr("requests.sessions.Session.request")
@pytest.fixture(autouse=True)
def no_asynch(mocker):
"""
Make all function calls synchronous.
"""
mocker.patch("zulipterminal.helper.asynch")
# --------------- Controller Fixtures -----------------------------------------
@pytest.fixture
def stream_button(mocker):
"""
Mocked stream button.
"""
view_mock = mocker.Mock()
view_mock.palette = [(None, "black", "white")]
button = StreamButton(
properties={
"name": "PTEST",
"id": 205,
"color": "#bfd56f",
"invite_only": False,
"description": "Test stream description",
},
controller=mocker.patch("zulipterminal.core.Controller"),
width=40,
view=view_mock,
count=30,
)
return button
@pytest.fixture
def user_button(mocker, width=38):
"""
Mocked User Button.
"""
return UserButton(
user={
"user_id": 5179,
"full_name": "Boo Boo",
"email": "[email protected]",
},
width=width,
controller=mocker.patch("zulipterminal.core.Controller"),
view=mocker.patch("zulipterminal.ui.View"),
state_marker="*",
)
@pytest.fixture
def msg_box(mocker, messages_successful_response):
"""
Mocked MessageBox with stream message
"""
return MessageBox(
messages_successful_response["messages"][0],
mocker.patch("zulipterminal.model.Model"),
None,
)
# --------------- Model Fixtures ----------------------------------------------
@pytest.fixture
def users_fixture(logged_on_user):
users = [logged_on_user]
for i in range(1, 3):
users.append(
{
"user_id": 10 + i,
"full_name": f"Human {i}",
"email": f"person{i}@example.com",
"avatar_url": None,
"is_active": True,
"bot_type": None,
"is_bot": False,
"is_admin": False,
}
)
return users
@pytest.fixture
def user_groups_fixture():
user_groups = []
members = [[1001, 11], [11, 12], [12], []]
for i in range(1, 5):
user_groups.append(
{
"id": 10 + i,
"name": f"Group {i}",
"description": f"Core developers of Group {i}",
"members": members[i - 1],
}
)
return user_groups
@pytest.fixture
def logged_on_user():
return {
"user_id": 1001,
"full_name": "Human Myself",
"email": "[email protected]",
"short_name": "Human",
}
general_stream = {
"name": "Some general stream",
"invite_only": False,
"color": "#b0a5fd", # Color in '#xxxxxx' format
"pin_to_top": False,
"stream_id": 1000,
"in_home_view": True,
"audible_notifications": False,
"description": "General Stream",
"is_old_stream": True,
"desktop_notifications": False,
"stream_weekly_traffic": 0,
"push_notifications": False,
"email_address": "[email protected]",
"subscribers": [1001, 11, 12],
}
# This is a private stream;
# only description/stream_id/invite_only/name/color vary from above
secret_stream = {
"description": "Some private stream",
"stream_id": 99,
"pin_to_top": False,
"invite_only": True,
"name": "Secret stream",
"email_address": "[email protected]",
"color": "#ccc", # Color in '#xxx' format
"in_home_view": True,
"audible_notifications": False,
"is_old_stream": True,
"desktop_notifications": False,
"stream_weekly_traffic": 0,
"push_notifications": False,
"subscribers": [1001, 11],
}
@pytest.fixture
def streams_fixture():
streams = [general_stream, secret_stream]
for i in range(1, 3):
streams.append(
{
"name": f"Stream {i}",
"invite_only": False,
"color": "#b0a5fd",
"pin_to_top": False,
"stream_id": i,
"in_home_view": True,
"audible_notifications": False,
"description": f"A description of stream {i}",
"is_old_stream": True,
"desktop_notifications": False,
"stream_weekly_traffic": 0,
"push_notifications": False,
"email_address": f"stream{i}@example.com",
"subscribers": [1001, 11, 12],
}
)
return deepcopy(streams)
@pytest.fixture
def realm_emojis():
# Omitting source_url, author_id (server version 3.0),
# author (server version < 3.0) since they are not used.
return {
"1": {
"deactivated": True,
"id": "1",
"name": "green_tick",
},
"202020": {
"deactivated": False,
"id": "202020",
"name": "joker",
},
"2": {
"deactivated": True,
"id": "2",
"name": "joy_cat",
},
"3": {
"deactivated": False,
"id": "3",
"name": "singing",
},
"4": {
"deactivated": False,
"id": "4",
"name": "zulip",
},
}
@pytest.fixture
def realm_emojis_data():
return OrderedDict(
[
("joker", {"code": "202020", "type": "realm_emoji"}),
("singing", {"code": "3", "type": "realm_emoji"}),
("zulip", {"code": "4", "type": "realm_emoji"}),
]
)
@pytest.fixture
def unicode_emojis():
return OrderedDict(
[
("happy", {"code": "1f600", "type": "unicode_emoji"}),
("joker", {"code": "1f0cf", "type": "unicode_emoji"}),
("joy_cat", {"code": "1f639", "type": "unicode_emoji"}),
("rock_on", {"code": "1f918", "type": "unicode_emoji"}),
("smile", {"code": "263a", "type": "unicode_emoji"}),
("smiley", {"code": "1f603", "type": "unicode_emoji"}),
("smirk", {"code": "1f60f", "type": "unicode_emoji"}),
]
)
@pytest.fixture
def zulip_emoji():
return OrderedDict([("zulip", {"code": "zulip", "type": "zulip_extra_emoji"})])
stream_msg_template = {
"id": 537286,
"sender_full_name": "Foo Foo",
"timestamp": 1520918722,
"client": "website",
"recipient_id": 6076,
"sender_email": "[email protected]",
"type": "stream",
"sender_realm_str": "",
"flags": ["read"],
"sender_id": 5140,
"content_type": "text/x-markdown",
"stream_id": 205,
"subject": "Test",
"reactions": [],
"subject_links": [],
"avatar_url": "dummy_avatar_url",
"is_me_message": False,
"sender_short_name": "foo",
"content": "Stream content here.",
"display_recipient": "PTEST",
}
pm_template = {
"id": 537287,
"sender_full_name": "Foo Foo",
"timestamp": 1520918736,
"client": "website",
"recipient_id": 5780,
"is_me_message": False,
"sender_email": "[email protected]",
"flags": ["read"],
"sender_id": 5140,
"content_type": "text/x-markdown",
"sender_realm_str": "",
"subject": "",
"reactions": [],
"type": "private",
"avatar_url": "dummy_avatar_url",
"subject_links": [],
"sender_short_name": "foo",
"content": "Hey PM content here.",
"display_recipient": [
{
"id": 5179,
"is_mirror_dummy": False,
"full_name": "Boo Boo",
"short_name": "boo",
"email": "[email protected]",
},
{
"short_name": "foo",
"id": 5140,
"is_mirror_dummy": False,
"full_name": "Foo Foo",
"email": "[email protected]",
},
],
}
group_pm_template = {
"id": 537288,
"sender_full_name": "Foo Foo",
"timestamp": 1520918737,
"client": "website",
"recipient_id": 5780, # FIXME Unsure
"is_me_message": False,
"sender_email": "[email protected]",
"flags": ["read"],
"sender_id": 5140,
"content_type": "text/x-markdown",
"sender_realm_str": "",
"subject": "",
"reactions": [],
"type": "private",
"avatar_url": "dummy_avatar_url",
"subject_links": [],
"sender_short_name": "foo",
"content": "Hey PM content here again.",
"display_recipient": [
{
"id": 5179,
"is_mirror_dummy": False,
"full_name": "Boo Boo",
"short_name": "boo",
"email": "[email protected]",
},
{
"short_name": "foo",
"id": 5140,
"is_mirror_dummy": False,
"full_name": "Foo Foo",
"email": "[email protected]",
},
{
"short_name": "bar",
"id": 5180,
"is_mirror_dummy": False,
"full_name": "Bar Bar",
"email": "[email protected]",
},
],
}
@pytest.fixture(
params=[stream_msg_template, pm_template, group_pm_template],
ids=["stream_message", "pm_message", "group_pm_message"],
)
def message_fixture(request):
"""
Acts as a parametrize fixture for stream msg, pms and group_pms.
"""
return deepcopy(request.param)
@pytest.fixture
def messages_successful_response() -> Dict[str, Any]:
"""
A successful response from a /messages API query.
"""
return deepcopy(
{
"anchor": 10000000000000000,
"messages": [
stream_msg_template,
pm_template,
group_pm_template,
],
"result": "success",
"msg": "",
}
)
@pytest.fixture(
params=SUPPORTED_SERVER_VERSIONS,
ids=(lambda param: "server_version:{}-server_feature_level:{}".format(*param)),
)
def zulip_version(request):
"""
Fixture to test different components based on the server version and the
feature level.
"""
return request.param
@pytest.fixture(
params=[
[
{
"content": "Hello!",
"timestamp": 1530129122,
"topic": "hello world",
"user_id": 1001,
# ...
}
],
[
{
"content": "Hello!",
"timestamp": 1530129122,
"topic": "party at my houz",
"user_id": 1001,
# ...
},
{
"content": "Howdy!",
"prev_content": "Hello!",
"prev_topic": "party at my houz",
"timestamp": 1530129134,
"topic": "party at my house",
"user_id": 1001,
# ...
},
],
],
ids=[
"unedited_message",
"edited_message",
],
)
def message_history(request):
"""
Returns message edit history for a message.
"""
return request.param
@pytest.fixture
def topics():
return ["Topic 1", "This is a topic", "Hello there!"]
@pytest.fixture
def initial_data(logged_on_user, users_fixture, streams_fixture, realm_emojis):
"""
Response from /register API request.
"""
return {
"full_name": logged_on_user["full_name"],
"email": logged_on_user["email"],
"user_id": logged_on_user["user_id"],
"realm_name": "Test Organization Name",
"unsubscribed": [
{
"audible_notifications": False,
"description": "announce",
"stream_id": 7,
"is_old_stream": True,
"desktop_notifications": False,
"pin_to_top": False,
"stream_weekly_traffic": 0,
"invite_only": False,
"name": "announce",
"push_notifications": False,
"email_address": "",
"color": "#bfd56f",
"in_home_view": True,
}
],
"result": "success",
"queue_id": "1522420755:786",
"realm_users": users_fixture,
"cross_realm_bots": [
{
"full_name": "Notification Bot",
"timezone": "",
"is_bot": True,
"date_joined": "2015-12-28T19:58:29.035543+00:00",
"email": "[email protected]",
"user_id": 5,
"is_admin": False,
"avatar_url": "dummy_avatar_url",
},
{
"full_name": "Email Gateway",
"timezone": "",
"is_bot": True,
"date_joined": "2015-12-28T19:58:29.037658+00:00",
"email": "[email protected]",
"user_id": 6,
"is_admin": False,
"avatar_url": "dummy_avatar_url",
},
{
"full_name": "Welcome Bot",
"timezone": "",
"is_bot": True,
"date_joined": "2015-12-28T19:58:29.033231+00:00",
"email": "[email protected]",
"user_id": 4,
"is_admin": False,
"avatar_url": "dummy_avatar_url",
},
{
"full_name": "Zulip Feedback Bot",
"timezone": "",
"is_bot": True,
"date_joined": "2015-12-28T19:58:28.972281+00:00",
"email": "[email protected]",
"user_id": 1,
"is_admin": False,
"avatar_url": "dummy_avatar_url",
},
],
"subscriptions": streams_fixture,
"msg": "",
"max_message_id": 552761,
"never_subscribed": [
{
"invite_only": False,
"description": "Announcements from the Zulip GCI Mentors",
"stream_id": 87,
"name": "GCI announce",
"is_old_stream": True,
"stream_weekly_traffic": 0,
},
{
"invite_only": False,
"description": "General discussion",
"stream_id": 74,
"name": "GCI general",
"is_old_stream": True,
"stream_weekly_traffic": 0,
},
],
"unread_msgs": {
"pms": [
{"sender_id": 1, "unread_message_ids": [1, 2]},
{"sender_id": 2, "unread_message_ids": [3]},
],
"count": 0,
"mentions": [],
"streams": [
{
"stream_id": 1000,
"topic": "Some general unread topic",
"unread_message_ids": [4, 5, 6],
"sender_ids": [1, 2],
},
{
"stream_id": 99,
"topic": "Some private unread topic",
"unread_message_ids": [7],
"sender_ids": [1, 2],
},
],
"huddles": [
{"user_ids_string": "1001,11,12", "unread_message_ids": [11, 12, 13]},
{
"user_ids_string": "1001,11,12,13",
"unread_message_ids": [101, 102],
},
],
},
"presences": {
"[email protected]": {
"ZulipElectron": {
"pushable": False,
"client": "ZulipElectron",
"status": "idle",
"timestamp": 1522484059,
},
"ZulipMobile": {
"pushable": False,
"client": "ZulipMobile",
"status": "idle",
"timestamp": 1522384165,
},
"aggregated": {
"timestamp": 1522484059,
"client": "ZulipElectron",
"status": "idle",
},
},
logged_on_user["email"]: {
"website": {
"pushable": True,
"client": "website",
"status": "active",
"timestamp": 1522458138,
},
"ZulipMobile": {
"pushable": True,
"client": "ZulipMobile",
"status": "active",
"timestamp": 1522480103,
},
"aggregated": {
"timestamp": 1522480103,
"client": "ZulipMobile",
"status": "active",
},
},
},
"twenty_four_hour_time": True,
"realm_emoji": realm_emojis,
"last_event_id": -1,
"muted_topics": [],
"realm_user_groups": [],
# Deliberately use hard-coded zulip version and feature level to avoid
# adding extra tests unnecessarily.
"zulip_version": MINIMUM_SUPPORTED_SERVER_VERSION[0],
"zulip_feature_level": MINIMUM_SUPPORTED_SERVER_VERSION[1],
"starred_messages": [1117554, 1117558, 1117574],
}
@pytest.fixture
def initial_index():
return deepcopy(helper_initial_index)
@pytest.fixture
def empty_index():
return deepcopy(
{
"pointer": defaultdict(set, {}),
"all_msg_ids": set(),
"starred_msg_ids": set(),
"mentioned_msg_ids": set(),
"private_msg_ids": set(),
"private_msg_ids_by_user_ids": defaultdict(set, {}),
"stream_msg_ids_by_stream_id": defaultdict(set, {}),
"topic_msg_ids": defaultdict(dict, {}),
"edited_messages": set(),
"topics": defaultdict(list),
"search": set(),
"messages": defaultdict(
dict,
{
stream_msg_template["id"]: stream_msg_template,
pm_template["id"]: pm_template,
group_pm_template["id"]: group_pm_template,
},
),
}
)
@pytest.fixture
def index_all_messages(empty_index):
"""
Expected index of `initial_data` fixture when model.narrow = []
"""
return dict(empty_index, **{"all_msg_ids": {537286, 537287, 537288}})
@pytest.fixture
def index_stream(empty_index):
"""
Expected index of initial_data when model.narrow = [['stream', '7']]
"""
diff = {
"stream_msg_ids_by_stream_id": defaultdict(set, {205: {537286}}),
"private_msg_ids": {537287, 537288},
}
return dict(empty_index, **diff)
@pytest.fixture
def index_topic(empty_index):
"""
Expected index of initial_data when model.narrow = [['stream', '7'],
['topic', 'Test']]
"""
diff = {"topic_msg_ids": defaultdict(dict, {205: {"Test": {537286}}})}
return dict(empty_index, **diff)
@pytest.fixture
def index_user(empty_index):
"""
Expected index of initial_data when model.narrow = [['pm_with',
'[email protected]'],
"""
user_ids = frozenset({5179, 5140})
diff = {
"private_msg_ids_by_user_ids": defaultdict(set, {user_ids: {537287}}),
"private_msg_ids": {537287, 537288},
}
return dict(empty_index, **diff)
@pytest.fixture
def index_user_multiple(empty_index):
"""
Expected index of initial_data when model.narrow = [['pm_with',
'[email protected], [email protected]'],
"""
user_ids = frozenset({5179, 5140, 5180})
diff = {
"private_msg_ids_by_user_ids": defaultdict(set, {user_ids: {537288}}),
"private_msg_ids": {537287, 537288},
}
return dict(empty_index, **diff)
@pytest.fixture(
params=[
{537286, 537287, 537288},
{537286},
{537287},
{537288},
{537286, 537287},
{537286, 537288},
{537287, 537288},
]
)
def index_all_starred(empty_index, request):
msgs_with_stars = request.param
index = dict(
empty_index, starred_msg_ids=msgs_with_stars, private_msg_ids={537287, 537288}
)
for msg_id, msg in index["messages"].items():
if msg_id in msgs_with_stars and "starred" not in msg["flags"]:
msg["flags"].append("starred")
return index
@pytest.fixture(
params=[
{537286, 537287, 537288},
{537286},
{537287},
{537288},
{537286, 537287},
{537286, 537288},
{537287, 537288},
]
)
def index_all_mentions(empty_index, request):
mentioned_messages = request.param
index = dict(
empty_index,
mentioned_msg_ids=mentioned_messages,
private_msg_ids={537287, 537288},
)
for msg_id, msg in index["messages"].items():
if msg_id in mentioned_messages and "mentioned" not in msg["flags"]:
msg["flags"].append("mentioned")
return index
@pytest.fixture
def user_profile(logged_on_user):
return { # FIXME These should all be self-consistent with others?
"max_message_id": 589270,
"short_name": logged_on_user["short_name"],
"full_name": logged_on_user["full_name"],
"email": logged_on_user["email"],
"is_bot": False,
"user_id": logged_on_user["user_id"],
"result": "success",
"client_id": "abcd",
"msg": "",
"is_admin": False,
"pointer": 589234,
}
@pytest.fixture
def error_response():
return {"msg": "Invalid API key", "result": "error"}
@pytest.fixture
def user_dict(logged_on_user):
"""
User_dict created according to `initial_data` fixture.
"""
return {
logged_on_user["email"]: {
"full_name": logged_on_user["full_name"],
"email": logged_on_user["email"],
"status": "active",
"user_id": logged_on_user["user_id"],
},
"[email protected]": {
"full_name": "Human 1",
"email": "[email protected]",
"user_id": 11,
"status": "inactive",
},
"[email protected]": {
"full_name": "Human 2",
"email": "[email protected]",
"user_id": 12,
"status": "inactive",
},
"[email protected]": {
"email": "[email protected]",
"full_name": "Email Gateway",
"status": "inactive",
"user_id": 6,
},
"[email protected]": {
"email": "[email protected]",
"full_name": "Zulip Feedback Bot",
"status": "inactive",
"user_id": 1,
},
"[email protected]": {
"email": "[email protected]",
"full_name": "Notification Bot",
"status": "inactive",
"user_id": 5,
},
"[email protected]": {
"email": "[email protected]",
"full_name": "Welcome Bot",
"status": "inactive",
"user_id": 4,
},
}
@pytest.fixture
def user_list(logged_on_user):
"""
List of users created corresponding to
`initial_data` fixture.
"""
# NOTE These are sorted active > idle, then according to full_name
return [
{
"full_name": logged_on_user["full_name"],
"email": logged_on_user["email"],
"status": "active",
"user_id": logged_on_user["user_id"],
},
{
"email": "[email protected]",
"full_name": "Email Gateway",
"status": "inactive",
"user_id": 6,
},
{
"full_name": "Human 1",
"email": "[email protected]",
"user_id": 11,
"status": "inactive",
},
{
"full_name": "Human 2",
"email": "[email protected]",
"user_id": 12,
"status": "inactive",
},
{
"email": "[email protected]",
"full_name": "Notification Bot",
"status": "inactive",
"user_id": 5,
},
{
"email": "[email protected]",
"full_name": "Welcome Bot",
"status": "inactive",
"user_id": 4,
},
{
"email": "[email protected]",
"full_name": "Zulip Feedback Bot",
"status": "inactive",
"user_id": 1,
},
]
@pytest.fixture
def streams():
"""
List of streams created corresponding to
`initial_data` fixture.
"""
return [
{
"name": "Secret stream",
"id": 99,
"color": "#ccc",
"invite_only": True,
"description": "Some private stream",
},
{
"name": "Some general stream",
"id": 1000,
"color": "#baf",
"invite_only": False,
"description": "General Stream",
},
{
"name": "Stream 1",
"id": 1,
"color": "#baf",
"invite_only": False,
"description": "A description of stream 1",
},
{
"name": "Stream 2",
"id": 2,
"color": "#baf",
"invite_only": False,
"description": "A description of stream 2",
},
]
@pytest.fixture
def user_id(logged_on_user):
"""
Default User id of the current
user, i.e., Tomás Farías
according to current Fixtures.
"""
return logged_on_user["user_id"]
@pytest.fixture
def stream_dict(streams_fixture):
return {stream["stream_id"]: stream for stream in streams_fixture}
@pytest.fixture(
params=[
{
("Stream 1", "muted stream muted topic"): None,
("Stream 2", "muted topic"): None,
},
{
("Stream 1", "muted stream muted topic"): 1530129122,
("Stream 2", "muted topic"): 1530129122,
},
],
ids=[
"zulip_feature_level:None",
"zulip_feature_level:1",
],
)
def processed_muted_topics(request):
"""
Locally processed muted topics data (see _muted_topics in Model.__init__).
"""
return request.param
@pytest.fixture
def classified_unread_counts():
"""
Unread counts return by
helper.classify_unread_counts function.
"""
return {
"all_msg": 12,
"all_pms": 8,
"unread_topics": {
(1000, "Some general unread topic"): 3,
(99, "Some private unread topic"): 1,
},
"unread_pms": {
1: 2,
2: 1,
},
"unread_huddles": {
frozenset({1001, 11, 12}): 3,
frozenset({1001, 11, 12, 13}): 2,
},
"streams": {1000: 3, 99: 1},
}
# --------------- UI Fixtures -----------------------------------------
@pytest.fixture(
params=[
(key, expected_key)
for keys, expected_key in [
(keys_for_command("GO_UP"), "up"),
(keys_for_command("GO_DOWN"), "down"),
(keys_for_command("SCROLL_UP"), "page up"),
(keys_for_command("SCROLL_DOWN"), "page down"),
(keys_for_command("GO_TO_BOTTOM"), "end"),
]
for key in keys
],
ids=lambda param: "key:{}-expected_key:{}".format(*param),
)
def navigation_key_expected_key_pair(request):
"""
Fixture to generate pairs of navigation keys with their respective
expected key.
The expected key is the one which is passed to the super `keypress` calls.
"""
return request.param
@pytest.fixture
def widget_size():
"""
Returns widget size for any widget.
"""
def _widget_size(widget):
widget_type, *_ = widget.sizing()
if widget_type == "box":
return (200, 20)
elif widget_type == "flow":
return (20,)
else:
None
return _widget_size
|
the-stack_106_29259 | from __future__ import division, absolute_import
import re
import numpy as np
from dataset_loader import DatasetLoader
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected, flatten
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.merge_ops import merge
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from constants import *
from os.path import isfile, join
import random
import sys
class EmotionRecognition:
def __init__(self):
self.dataset = DatasetLoader()
def build_network(self):
# Smaller 'AlexNet'
# https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
print('[+] Building CNN')
self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
self.network = conv_2d(self.network, 64, 5, activation = 'relu')
#self.network = local_response_normalization(self.network)
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = conv_2d(self.network, 64, 5, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = conv_2d(self.network, 128, 4, activation = 'relu')
self.network = dropout(self.network, 0.3)
self.network = fully_connected(self.network, 3072, activation = 'relu')
self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
self.network = regression(self.network,
optimizer = 'momentum',
loss = 'categorical_crossentropy')
self.model = tflearn.DNN(
self.network,
checkpoint_path = SAVE_DIRECTORY + '/emotion_recognition',
max_checkpoints = 1,
tensorboard_verbose = 2
)
self.load_model()
def load_saved_dataset(self):
self.dataset.load_from_save()
print('[+] Dataset found and loaded')
def start_training(self):
self.load_saved_dataset()
self.build_network()
if self.dataset is None:
self.load_saved_dataset()
# Training
print('[+] Training network')
self.model.fit(
self.dataset.images, self.dataset.labels,
validation_set = (self.dataset.images_test, self.dataset._labels_test),
n_epoch = 100,
batch_size = 50,
shuffle = True,
show_metric = True,
snapshot_step = 200,
snapshot_epoch = True,
run_id = 'emotion_recognition'
)
def predict(self, image):
if image is None:
return None
image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
return self.model.predict(image)
def save_model(self):
self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)
def load_model(self):
if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
def show_usage():
# I din't want to have more dependecies
print('[!] Usage: python emotion_recognition.py')
print('\t emotion_recognition.py train \t Trains and saves model with saved dataset')
print('\t emotion_recognition.py poc \t Launch the proof of concept')
if __name__ == "__main__":
if len(sys.argv) <= 1:
show_usage()
exit()
network = EmotionRecognition()
if sys.argv[1] == 'train':
network.start_training()
network.save_model()
elif sys.argv[1] == 'poc':
import poc
else:
show_usage()
|
the-stack_106_29261 | from os import path
def run():
with open(path.join(path.dirname(__file__), '../inputs/03.txt')) as file:
n = int(file.read())
print('part 1:', spiral_distance(n))
print('part 2:', first_greater_than(n))
def first_greater_than(n):
x = 0
y = 0
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
neighbors = directions + [(1, 1), (1, -1), (-1, 1), (-1, -1)]
dir_count = len(directions)
dir_idx = -1
cells = {(0, 0): 1}
while True:
new_dir_idx = (dir_idx + 1) % dir_count
new_dir = directions[new_dir_idx]
new_pos = (x + new_dir[0], y + new_dir[1])
if new_pos in cells:
same_dir = directions[dir_idx]
new_pos = (x + same_dir[0], y + same_dir[1])
else:
dir_idx = new_dir_idx
x = new_pos[0]
y = new_pos[1]
value = 0
for neighbor in neighbors:
neighbor_pos = (x + neighbor[0], y + neighbor[1])
value += cells.get(neighbor_pos, 0)
cells[new_pos] = value
if value > n:
return value
def spiral_distance(n):
x = 0
y = 0
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
dir_count = len(directions)
dir_idx = -1
width = 1
turn_in = 0
for i in range(1, n):
if turn_in == 0:
turn_in = width - 1
dir_idx = (dir_idx + 1) % dir_count
else:
turn_in -= 1
if i == width * width + width:
width += 1
x += directions[dir_idx][0]
y += directions[dir_idx][1]
return abs(x) + abs(y)
if __name__ == '__main__':
run()
|
the-stack_106_29262 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unlessf required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..framework import convert_np_dtype_to_dtype_
from ..framework import Variable
from ..initializer import Constant, force_init_on_cpu
from ..core import VarDesc
from .layer_function_generator import templatedoc
import numpy
__all__ = [
'create_tensor',
'create_parameter',
'create_global_var',
'cast',
'concat',
'sums',
'assign',
'fill_constant_batch_size_like',
'fill_constant',
'argmin',
'argmax',
'argsort',
'ones',
'zeros',
'reverse',
]
def create_tensor(dtype, name=None, persistable=False):
"""
Create an variable, which will hold a LoDTensor with data type dtype.
Args:
dtype(string): 'float32'|'int32'|..., the data type of the
created tensor.
name(string): The name of the created tensor, if not set,
the name will be a random unique one.
persistable(bool): Set the persistable flag of the create tensor.
Returns:
Variable: The tensor variable storing the created tensor.
Examples:
.. code-block:: python
tensor = fluid.layers.create_tensor(dtype='float32')
"""
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable)
def create_parameter(shape,
dtype,
name=None,
attr=None,
is_bias=False,
default_initializer=None):
"""
Create a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
Args:
shape(list[int]): shape of the parameter
dtype(string): element type of the parameter
attr(ParamAttr): attributes of the parameter
is_bias(bool): This can affect which default initializer is chosen
when default_initializer is None. If is_bias,
initializer.Constant(0.0) will be used. Otherwise,
Xavier() will be used.
default_initializer(Initializer): initializer for the parameter
Returns:
the created parameter.
Examples:
>>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
>>> data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
>>> hidden = fluid.layers.matmul(x=data, y=W)
"""
helper = LayerHelper("create_parameter", **locals())
if attr is None:
attr = ParamAttr(name=name)
return helper.create_parameter(attr, shape, dtype, is_bias,
default_initializer)
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
Create a new variable in the global block(block 0).
Args:
shape(list[int]): shape of the variable
value(float): the value of the variable. The new created
variable will be filled with it.
dtype(string): data type of the variable
persistable(bool): if this variable is persistable.
Default: False
force_cpu(bool): force this variable to be on CPU.
Default: False
name(str|None): The name of the variable. If set to None the variable
name will be generated automatically.
Default: None
Returns:
Variable: the created Variable
Examples:
.. code-block:: python
var = fluid.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype, shape=shape, persistable=persistable, name=name)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var
def cast(x, dtype):
"""
This layer takes in the Variable :attr:`x` with :attr:`x.dtype` and casts
it to the output with :attr:`dtype`.
Args:
x (Variable): The input Variable for casting.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output Variable.
Returns:
Variable: The output Variable after casting.
Examples:
.. code-block:: python
data = fluid.layers.data(name='x', shape=[13], dtype='float32')
result = fluid.layers.cast(x=data, dtype='float64')
"""
helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
def concat(input, axis=0, name=None):
"""
**Concat**
This function concatenates the input along the axis mentioned
and returns that as the output.
Args:
input(list): List of tensors to be concatenated
axis(int): Integer axis along which the tensors will be concatenated
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: Output variable of the concatenation
Examples:
.. code-block:: python
out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
"""
helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='concat',
inputs={'X': input},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def sums(input, out=None):
"""
This function performs the sum operation on the input and returns the
result as the output.
Args:
input (Variable|list): The input tensor that has the elements
that need to be summed up.
out (Variable|None): Output parameter. The sum result.
Default: None
Returns:
Variable: the sum of input. The same as the argument 'out'
Examples:
.. code-block:: python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
a0 = layers.array_read(array=tmp, i=i)
i = layers.increment(x=i)
a1 = layers.array_read(array=tmp, i=i)
mean_a0 = layers.mean(a0)
mean_a1 = layers.mean(a1)
a_sum = layers.sums(input=[mean_a0, mean_a1])
"""
helper = LayerHelper('sum', **locals())
if out is None:
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='sum',
inputs={'X': input},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def assign(input, output=None):
"""
**Assign**
This function copies the *input* Variable to the *output* Variable.
Args:
input(Variable|numpy.ndarray): The source variable
output(Variable|None): The destination variable
Returns:
Variable: The destination variable that was supplied as the *output*.
Examples:
.. code-block:: python
out = fluid.layers.create_tensor(dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
fluid.layers.assign(hidden, out)
"""
helper = LayerHelper('assign', **locals())
if output is None:
output = helper.create_tmp_variable(dtype=input.dtype)
if isinstance(input, Variable):
helper.append_op(
type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
elif isinstance(input, numpy.ndarray):
dtype = convert_np_dtype_to_dtype_(input.dtype)
if dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in input.flat]
elif dtype == VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in input.flat]
else:
raise ValueError("Unsupported dtype %s", input.dtype)
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
helper.append_op(
type='assign_value',
outputs={'Out': [output]},
attrs={
'dtype': dtype,
'shape': list(input.shape),
value_name: values
})
else:
raise ValueError("Wrong type for assign input: %s" % type(input))
return output
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"""
**fill_constant**
This function creates a tensor with specified `shape` and `dtype`, and
initializes it with a constant specifed by `value`.
The attribute `stop_gradient` of the created tensor is set to True.
Args:
shape(tuple|list|None): Shape of the output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor.
value(float): The constant value used to initialize the output tensor.
out(Variable): The output tensor.
force_cpu(True|False): data should be on CPU if set true.
Returns:
Variable: The tensor variable storing the output.
Examples:
.. code-block:: python
data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64')
"""
helper = LayerHelper("fill_constant", **locals())
if out is None:
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'force_cpu': force_cpu or force_init_on_cpu()
})
out.stop_gradient = True
return out
@templatedoc()
def fill_constant_batch_size_like(input,
shape,
dtype,
value,
input_dim_idx=0,
output_dim_idx=0):
"""
${comment}
It also sets *stop_gradient* to True.
>>> data = fluid.layers.fill_constant_batch_size_like(
>>> input=like, shape=[1], value=0, dtype='int64')
Args:
input(${input_type}): ${input_comment}.
shape(${shape_type}): ${shape_comment}.
dtype(${dtype_type}): ${dtype_comment}.
value(${value_type}): ${value_comment}.
input_dim_idx(${input_dim_idx_type}): ${input_dim_idx_comment}.
output_dim_idx(${output_dim_idx_type}): ${output_dim_idx_comment}.
Returns:
${out_comment}.
"""
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx
})
out.stop_gradient = True
return out
def argmin(x, axis=0):
"""
**argmin**
This function computes the indices of the min elements
of the input tensor's element along the provided axis.
Args:
x(Variable): The input to compute the indices of
the min elements.
axis(int): Axis to compute indices along.
Returns:
Variable: The tensor variable storing the output
Examples:
.. code-block:: python
out = fluid.layers.argmin(x=in, axis=0)
out = fluid.layers.argmin(x=in, axis=-1)
"""
helper = LayerHelper("arg_min", **locals())
out = helper.create_tmp_variable(VarDesc.VarType.INT64)
helper.append_op(
type='arg_min',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def argmax(x, axis=0):
"""
**argmax**
This function computes the indices of the max elements
of the input tensor's element along the provided axis.
Args:
x(Variable): The input to compute the indices of
the max elements.
axis(int): Axis to compute indices along.
Returns:
Variable: The tensor variable storing the output
Examples:
.. code-block:: python
out = fluid.layers.argmax(x=in, axis=0)
out = fluid.layers.argmax(x=in, axis=-1)
"""
helper = LayerHelper("arg_max", **locals())
out = helper.create_tmp_variable(VarDesc.VarType.INT64)
helper.append_op(
type='arg_max',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def argsort(input, axis=-1, name=None):
"""
Performs sorting on the input Variable along the given axis, and outputs
sorted data Varibale and its corresponding index Variable with the same
shape as :attr:`input`.
.. code-block:: text
For example, the given axis is -1 and the input Variable
input = [[0.15849551, 0.45865775, 0.8563702 ],
[0.12070083, 0.28766365, 0.18776911]],
after argsort, the sorted Vairable becomes
out = [[0.15849551, 0.45865775, 0.8563702 ],
[0.12070083, 0.18776911, 0.28766365]],
and the sorted indices along the given axis turn outs to be
indices = [[0, 1, 2],
[0, 2, 1]]
Args:
input(Variable): The input Variable for sorting.
axis(int): The axis along which to sort the input Variable. When
:attr:`axis` < 0, the actual axis will be :attr:`axis` +
rank(:attr:`input`). Default -1, the last dimension.
name(str|None): (optional) A name for this layer. If set None, the
layer will be named automatically.
Returns:
tuple: A tuple of sorted data Variable and the sorted indices.
Examples:
.. code-block:: python
input = fluid.layers.data(data=[2, 3])
out, indices = fluid.layers.argsort(input, axis=0)
"""
helper = LayerHelper("argsort", **locals())
out = helper.create_tmp_variable(dtype=input.dtype, stop_gradient=True)
ids = helper.create_tmp_variable(VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': input},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis})
return out, ids
def ones(shape, dtype, force_cpu=False):
"""
**ones**
This function creates a tensor of specified *shape* and
*dtype*, and initializes this with 1.
It also sets *stop_gradient* to True.
Args:
shape(tuple|list|None): Shape of output tensor
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor
Returns:
Variable: The tensor variable storing the output
Examples:
.. code-block:: python
data = fluid.layers.ones(shape=[1], dtype='int64')
"""
return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, force_cpu=False):
"""
**zeros**
This function creates a tensor of specified *shape* and
*dtype*, and initializes this with 0.
It also sets *stop_gradient* to True.
Args:
shape(tuple|list|None): Shape of output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor.
force_cpu(bool, default False): Whether to make output stay on CPU.
Returns:
Variable: The tensor variable storing the output.
Examples:
.. code-block:: python
data = fluid.layers.zeros(shape=[1], dtype='int64')
"""
return fill_constant(value=0.0, **locals())
def reverse(x, axis):
"""
**reverse**
This function reverse the input 'x' along given axises.
Args:
x(Vairbale): the input to be reversed.
axis(int|tuple|list): Axis that along which order of elements
is reversed. If it is a tuple or a list, reversing
will be apply on each axis in the tuple or list.
Returns:
Variable: The reversed tensor.
Examples:
.. code-block:: python
out = fluid.layers.reverse(x=in, axis=0)
# or:
out = fluid.layers.reverse(x=in, axis=[0,1])
"""
if isinstance(axis, int):
axis = [axis]
helper = LayerHelper("reverse", **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='reverse',
inputs={'Input': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def save(x, file_path, overwrite=True):
"""
Saves a variable as a file.
Args:
x(variable): The Tensor/LoDTensor to be saved.
file_path(str): The file path where the variable will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
"""
helper = LayerHelper("save", **locals())
helper.append_op(
type="save",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def save_combine(x, file_path, overwrite=True):
"""
Saves a list of variables into a single file.
Args:
x(list): A list of Tensor/LoDTensor variables to be saved together in
a single file.
file_path(str): The file path where variables will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
Returns:
There is no return value.
Examples:
.. code-block:: python
v1 = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
v2 = fluid.layers.data(name="data",
shape=(6, 8, 4),
dtype="float32")
normed = fluid.layers.save_combine([v1, v2], file_path="output")
"""
helper = LayerHelper("save_combine", **locals())
helper.append_op(
type="save_combine",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def load_combine(out, file_path):
"""
Loads a list of vairables from a single file.
Args:
out(list): The list of variables to be read from the disk file.
file_path(str): The path of the disk file.
"""
helper = LayerHelper("load_combine", **locals())
helper.append_op(
type="load_combine",
inputs={},
output={"Out": out},
args={"file_path": file_path})
|
the-stack_106_29264 | #!/usr/bin/env python3
# Safe Update: A simple service that waits for network access and tries to
# update every 10 minutes. It's intended to make the OP update process more
# robust against Git repository corruption. This service DOES NOT try to fix
# an already-corrupt BASEDIR Git repo, only prevent it from happening.
#
# During normal operation, both onroad and offroad, the update process makes
# no changes to the BASEDIR install of OP. All update attempts are performed
# in a disposable staging area provided by OverlayFS. It assumes the deleter
# process provides enough disk space to carry out the process.
#
# If an update succeeds, a flag is set, and the update is swapped in at the
# next reboot. If an update is interrupted or otherwise fails, the OverlayFS
# upper layer and metadata can be discarded before trying again.
#
# The swap on boot is triggered by launch_chffrplus.sh
# gated on the existence of $FINALIZED/.overlay_consistent and also the
# existence and mtime of $BASEDIR/.overlay_init.
#
# Other than build byproducts, BASEDIR should not be modified while this
# service is running. Developers modifying code directly in BASEDIR should
# disable this service.
import os
import datetime
import subprocess
import psutil
import shutil
import signal
import fcntl
import time
import threading
from pathlib import Path
from typing import List, Tuple, Optional
from common.basedir import BASEDIR
from common.markdown import parse_markdown
from common.params import Params
from selfdrive.hardware import EON, TICI, HARDWARE
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.version import is_tested_branch
LOCK_FILE = os.getenv("UPDATER_LOCK_FILE", "/tmp/safe_staging_overlay.lock")
STAGING_ROOT = os.getenv("UPDATER_STAGING_ROOT", "/data/safe_staging")
NEOSUPDATE_DIR = os.getenv("UPDATER_NEOSUPDATE_DIR", "/data/neoupdate")
OVERLAY_UPPER = os.path.join(STAGING_ROOT, "upper")
OVERLAY_METADATA = os.path.join(STAGING_ROOT, "metadata")
OVERLAY_MERGED = os.path.join(STAGING_ROOT, "merged")
FINALIZED = os.path.join(STAGING_ROOT, "finalized")
DAYS_NO_CONNECTIVITY_MAX = 14 # do not allow to engage after this many days
DAYS_NO_CONNECTIVITY_PROMPT = 10 # send an offroad prompt after this many days
class WaitTimeHelper:
def __init__(self, proc):
self.proc = proc
self.ready_event = threading.Event()
self.shutdown = False
signal.signal(signal.SIGTERM, self.graceful_shutdown)
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGHUP, self.update_now)
def graceful_shutdown(self, signum: int, frame) -> None:
# umount -f doesn't appear effective in avoiding "device busy" on NEOS,
# so don't actually die until the next convenient opportunity in main().
cloudlog.info("caught SIGINT/SIGTERM, dismounting overlay at next opportunity")
# forward the signal to all our child processes
child_procs = self.proc.children(recursive=True)
for p in child_procs:
p.send_signal(signum)
self.shutdown = True
self.ready_event.set()
def update_now(self, signum: int, frame) -> None:
cloudlog.info("caught SIGHUP, running update check immediately")
self.ready_event.set()
def sleep(self, t: float) -> None:
self.ready_event.wait(timeout=t)
def run(cmd: List[str], cwd: Optional[str] = None, low_priority: bool = False):
if low_priority:
cmd = ["nice", "-n", "19"] + cmd
return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf8')
def set_consistent_flag(consistent: bool) -> None:
os.sync()
consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent"))
if consistent:
consistent_file.touch()
elif not consistent:
consistent_file.unlink(missing_ok=True)
os.sync()
def set_params(new_version: bool, failed_count: int, exception: Optional[str]) -> None:
params = Params()
params.put("UpdateFailedCount", str(failed_count))
last_update = datetime.datetime.utcnow()
if failed_count == 0:
t = last_update.isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
else:
try:
t = params.get("LastUpdateTime", encoding='utf8')
last_update = datetime.datetime.fromisoformat(t)
except (TypeError, ValueError):
pass
if exception is None:
params.delete("LastUpdateException")
else:
params.put("LastUpdateException", exception)
# Write out release notes for new versions
if new_version:
try:
with open(os.path.join(FINALIZED, "RELEASES.md"), "rb") as f:
r = f.read().split(b'\n\n', 1)[0] # Slice latest release notes
try:
params.put("ReleaseNotes", parse_markdown(r.decode("utf-8")))
except Exception:
params.put("ReleaseNotes", r + b"\n")
except Exception:
params.put("ReleaseNotes", "")
params.put_bool("UpdateAvailable", True)
# Handle user prompt
for alert in ("Offroad_UpdateFailed", "Offroad_ConnectivityNeeded", "Offroad_ConnectivityNeededPrompt"):
set_offroad_alert(alert, False)
now = datetime.datetime.utcnow()
dt = now - last_update
if failed_count > 15 and exception is not None:
if is_tested_branch():
extra_text = "Ensure the software is correctly installed"
else:
extra_text = exception
set_offroad_alert("Offroad_UpdateFailed", True, extra_text=extra_text)
elif dt.days > DAYS_NO_CONNECTIVITY_MAX and failed_count > 1:
set_offroad_alert("Offroad_ConnectivityNeeded", True)
elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
remaining = max(DAYS_NO_CONNECTIVITY_MAX - dt.days, 1)
set_offroad_alert("Offroad_ConnectivityNeededPrompt", True, extra_text=f"{remaining} day{'' if remaining == 1 else 's'}.")
def setup_git_options(cwd: str) -> None:
# We sync FS object atimes (which NEOS doesn't use) and mtimes, but ctimes
# are outside user control. Make sure Git is set up to ignore system ctimes,
# because they change when we make hard links during finalize. Otherwise,
# there is a lot of unnecessary churn. This appears to be a common need on
# OSX as well: https://www.git-tower.com/blog/make-git-rebase-safe-on-osx/
# We are using copytree to copy the directory, which also changes
# inode numbers. Ignore those changes too.
# Set protocol to the new version (default after git 2.26) to reduce data
# usage on git fetch --dry-run from about 400KB to 18KB.
git_cfg = [
("core.trustctime", "false"),
("core.checkStat", "minimal"),
("protocol.version", "2"),
("gc.auto", "0"),
("gc.autoDetach", "false"),
]
for option, value in git_cfg:
run(["git", "config", option, value], cwd)
def dismount_overlay() -> None:
if os.path.ismount(OVERLAY_MERGED):
cloudlog.info("unmounting existing overlay")
args = ["umount", "-l", OVERLAY_MERGED]
if TICI:
args = ["sudo"] + args
run(args)
def init_overlay() -> None:
overlay_init_file = Path(os.path.join(BASEDIR, ".overlay_init"))
# Re-create the overlay if BASEDIR/.git has changed since we created the overlay
if overlay_init_file.is_file():
git_dir_path = os.path.join(BASEDIR, ".git")
new_files = run(["find", git_dir_path, "-newer", str(overlay_init_file)])
if not len(new_files.splitlines()):
# A valid overlay already exists
return
else:
cloudlog.info(".git directory changed, recreating overlay")
cloudlog.info("preparing new safe staging area")
params = Params()
params.put_bool("UpdateAvailable", False)
set_consistent_flag(False)
dismount_overlay()
if TICI:
run(["sudo", "rm", "-rf", STAGING_ROOT])
if os.path.isdir(STAGING_ROOT):
shutil.rmtree(STAGING_ROOT)
for dirname in [STAGING_ROOT, OVERLAY_UPPER, OVERLAY_METADATA, OVERLAY_MERGED]:
os.mkdir(dirname, 0o755)
if os.lstat(BASEDIR).st_dev != os.lstat(OVERLAY_MERGED).st_dev:
raise RuntimeError("base and overlay merge directories are on different filesystems; not valid for overlay FS!")
# Leave a timestamped canary in BASEDIR to check at startup. The device clock
# should be correct by the time we get here. If the init file disappears, or
# critical mtimes in BASEDIR are newer than .overlay_init, continue.sh can
# assume that BASEDIR has used for local development or otherwise modified,
# and skips the update activation attempt.
consistent_file = Path(os.path.join(BASEDIR, ".overlay_consistent"))
if consistent_file.is_file():
consistent_file.unlink()
overlay_init_file.touch()
os.sync()
overlay_opts = f"lowerdir={BASEDIR},upperdir={OVERLAY_UPPER},workdir={OVERLAY_METADATA}"
mount_cmd = ["mount", "-t", "overlay", "-o", overlay_opts, "none", OVERLAY_MERGED]
if TICI:
run(["sudo"] + mount_cmd)
run(["sudo", "chmod", "755", os.path.join(OVERLAY_METADATA, "work")])
else:
run(mount_cmd)
git_diff = run(["git", "diff"], OVERLAY_MERGED, low_priority=True)
params.put("GitDiff", git_diff)
cloudlog.info(f"git diff output:\n{git_diff}")
def finalize_update(wait_helper: WaitTimeHelper) -> None:
"""Take the current OverlayFS merged view and finalize a copy outside of
OverlayFS, ready to be swapped-in at BASEDIR. Copy using shutil.copytree"""
# Remove the update ready flag and any old updates
cloudlog.info("creating finalized version of the overlay")
set_consistent_flag(False)
# Copy the merged overlay view and set the update ready flag
if os.path.exists(FINALIZED):
shutil.rmtree(FINALIZED)
shutil.copytree(OVERLAY_MERGED, FINALIZED, symlinks=True)
run(["git", "reset", "--hard"], FINALIZED)
run(["git", "submodule", "foreach", "--recursive", "git", "reset"], FINALIZED)
cloudlog.info("Starting git gc")
t = time.monotonic()
try:
run(["git", "gc"], FINALIZED)
cloudlog.event("Done git gc", duration=time.monotonic() - t)
except subprocess.CalledProcessError:
cloudlog.exception(f"Failed git gc, took {time.monotonic() - t:.3f} s")
if wait_helper.shutdown:
cloudlog.info("got interrupted finalizing overlay")
else:
set_consistent_flag(True)
cloudlog.info("done finalizing overlay")
def handle_agnos_update(wait_helper: WaitTimeHelper) -> None:
from selfdrive.hardware.tici.agnos import flash_agnos_update, get_target_slot_number
cur_version = HARDWARE.get_os_version()
updated_version = run(["bash", "-c", r"unset AGNOS_VERSION && source launch_env.sh && \
echo -n $AGNOS_VERSION"], OVERLAY_MERGED).strip()
cloudlog.info(f"AGNOS version check: {cur_version} vs {updated_version}")
if cur_version == updated_version:
return
# prevent an openpilot getting swapped in with a mismatched or partially downloaded agnos
set_consistent_flag(False)
cloudlog.info(f"Beginning background installation for AGNOS {updated_version}")
set_offroad_alert("Offroad_NeosUpdate", True)
manifest_path = os.path.join(OVERLAY_MERGED, "selfdrive/hardware/tici/agnos.json")
target_slot_number = get_target_slot_number()
flash_agnos_update(manifest_path, target_slot_number, cloudlog)
set_offroad_alert("Offroad_NeosUpdate", False)
def handle_neos_update(wait_helper: WaitTimeHelper) -> None:
from selfdrive.hardware.eon.neos import download_neos_update
cur_neos = HARDWARE.get_os_version()
updated_neos = run(["bash", "-c", r"unset REQUIRED_NEOS_VERSION && source launch_env.sh && \
echo -n $REQUIRED_NEOS_VERSION"], OVERLAY_MERGED).strip()
cloudlog.info(f"NEOS version check: {cur_neos} vs {updated_neos}")
if cur_neos == updated_neos:
return
cloudlog.info(f"Beginning background download for NEOS {updated_neos}")
set_offroad_alert("Offroad_NeosUpdate", True)
update_manifest = os.path.join(OVERLAY_MERGED, "selfdrive/hardware/eon/neos.json")
neos_downloaded = False
start_time = time.monotonic()
# Try to download for one day
while not neos_downloaded and not wait_helper.shutdown and \
(time.monotonic() - start_time < 60*60*24):
wait_helper.ready_event.clear()
try:
download_neos_update(update_manifest, cloudlog)
neos_downloaded = True
except Exception:
cloudlog.info("NEOS background download failed, retrying")
wait_helper.sleep(120)
# If the download failed, we'll show the alert again when we retry
set_offroad_alert("Offroad_NeosUpdate", False)
if not neos_downloaded:
raise Exception("Failed to download NEOS update")
cloudlog.info(f"NEOS background download successful, took {time.monotonic() - start_time} seconds")
def check_git_fetch_result(fetch_txt: str) -> bool:
err_msg = "Failed to add the host to the list of known hosts (/data/data/com.termux/files/home/.ssh/known_hosts).\n"
return len(fetch_txt) > 0 and (fetch_txt != err_msg)
def check_for_update() -> Tuple[bool, bool]:
setup_git_options(OVERLAY_MERGED)
try:
git_fetch_output = run(["git", "fetch", "--dry-run"], OVERLAY_MERGED, low_priority=True)
return True, check_git_fetch_result(git_fetch_output)
except subprocess.CalledProcessError:
return False, False
def fetch_update(wait_helper: WaitTimeHelper) -> bool:
cloudlog.info("attempting git fetch inside staging overlay")
setup_git_options(OVERLAY_MERGED)
git_fetch_output = run(["git", "fetch"], OVERLAY_MERGED, low_priority=True)
cloudlog.info("git fetch success: %s", git_fetch_output)
cur_hash = run(["git", "rev-parse", "HEAD"], OVERLAY_MERGED).rstrip()
upstream_hash = run(["git", "rev-parse", "@{u}"], OVERLAY_MERGED).rstrip()
new_version: bool = cur_hash != upstream_hash
git_fetch_result = check_git_fetch_result(git_fetch_output)
cloudlog.info(f"comparing {cur_hash} to {upstream_hash}")
if new_version or git_fetch_result:
cloudlog.info("Running update")
if new_version:
cloudlog.info("git reset in progress")
r = [
run(["git", "reset", "--hard", "@{u}"], OVERLAY_MERGED, low_priority=True),
run(["git", "clean", "-xdf"], OVERLAY_MERGED, low_priority=True ),
run(["git", "submodule", "init"], OVERLAY_MERGED, low_priority=True),
run(["git", "submodule", "update"], OVERLAY_MERGED, low_priority=True),
]
cloudlog.info("git reset success: %s", '\n'.join(r))
if EON:
handle_neos_update(wait_helper)
elif TICI:
handle_agnos_update(wait_helper)
# Create the finalized, ready-to-swap update
finalize_update(wait_helper)
cloudlog.info("openpilot update successful!")
else:
cloudlog.info("nothing new from git at this time")
return new_version
def main() -> None:
params = Params()
if params.get_bool("DisableUpdates"):
cloudlog.warning("updates are disabled by the DisableUpdates param")
exit(0)
ov_lock_fd = open(LOCK_FILE, 'w')
try:
fcntl.flock(ov_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
raise RuntimeError("couldn't get overlay lock; is another instance running?") from e
# Set low io priority
proc = psutil.Process()
if psutil.LINUX:
proc.ionice(psutil.IOPRIO_CLASS_BE, value=7)
# Check if we just performed an update
if Path(os.path.join(STAGING_ROOT, "old_openpilot")).is_dir():
cloudlog.event("update installed")
if not params.get("InstallDate"):
t = datetime.datetime.utcnow().isoformat()
params.put("InstallDate", t.encode('utf8'))
overlay_init = Path(os.path.join(BASEDIR, ".overlay_init"))
overlay_init.unlink(missing_ok=True)
first_run = True
last_fetch_time = 0.0
update_failed_count = 0 # TODO: Load from param?
# Wait for IsOffroad to be set before our first update attempt
wait_helper = WaitTimeHelper(proc)
wait_helper.sleep(30)
# Run the update loop
# * every 5m, do a lightweight internet/update check
# * every 10m, do a full git fetch
while not wait_helper.shutdown:
update_now = wait_helper.ready_event.is_set()
wait_helper.ready_event.clear()
# Attempt an update
exception = None
new_version = False
update_failed_count += 1
try:
init_overlay()
internet_ok, update_available = check_for_update()
if internet_ok and not update_available:
update_failed_count = 0
# Fetch updates at most every 10 minutes
if internet_ok and (update_now or time.monotonic() - last_fetch_time > 60*10):
new_version = fetch_update(wait_helper)
update_failed_count = 0
last_fetch_time = time.monotonic()
if first_run and not new_version and os.path.isdir(NEOSUPDATE_DIR):
shutil.rmtree(NEOSUPDATE_DIR)
first_run = False
except subprocess.CalledProcessError as e:
cloudlog.event(
"update process failed",
cmd=e.cmd,
output=e.output,
returncode=e.returncode
)
exception = f"command failed: {e.cmd}\n{e.output}"
overlay_init.unlink(missing_ok=True)
except Exception as e:
cloudlog.exception("uncaught updated exception, shouldn't happen")
exception = str(e)
overlay_init.unlink(missing_ok=True)
if not wait_helper.shutdown:
try:
set_params(new_version, update_failed_count, exception)
except Exception:
cloudlog.exception("uncaught updated exception while setting params, shouldn't happen")
# TODO: replace this with a good backoff
wait_helper.sleep(300)
dismount_overlay()
if __name__ == "__main__":
main()
|
the-stack_106_29267 | import numpy as np
import json
import os
import cv2
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
import argparse
def computeIoU(head_bb, person_bb, epsilon=0.1, threshold=0.7):
"""
compute the ratio of intersection and union of the given head and the given person
the area of the person is weighted by epsilon
intersection over union = area of overlap / (area of head-box + epsilon * area of body-box)
:param person_bb: person bounding box
:param head_bb: head bounding box
:param epsilon: weight for person area
:return: "intersection over union"-like stuff
"""
headbox_area = (head_bb[2]-head_bb[0])*(head_bb[3]-head_bb[1])
person_area = (person_bb[2]-person_bb[0])*(person_bb[3]-person_bb[1])
dx = min(head_bb[2], person_bb[2])-max(head_bb[0], person_bb[0])
dy = min(head_bb[3], person_bb[3])-max(head_bb[1], person_bb[1])
result = 0
overlap_area = 0
if dx > 0 and dy > 0: # make sure person and head intersects
overlap_area = dx * dy
if computeIoH(overlap_area, headbox_area) > threshold: # TODO max problem instead of min
result = -overlap_area / (headbox_area + epsilon * person_area)
# if np.abs(result) > threshold:
# return result
# else:
# return 0
return result
def computeIoH(overlap, head):
"""
compute the ratio of intersection (of head and person) and head area
intersection over head-box = area of overlap / area of head-box
:param overlap: area of intersection
:param head: area of head
:return: IoH
"""
return overlap/head
# in progress...
def center(person_bb, head_bb, distance='euclidean'):
# compute distance from the two centers
width_head = head_bb[2]-head_bb[0]
height_head = head_bb[3]-head_bb[1]
center_head = np.array([head_bb[0]+width_head/2, head_bb[1]+height_head/2])
width_person = person_bb[2]-person_bb[0]
height_person = person_bb[3]-person_bb[3]
center_person = np.array([person_bb[0]+width_person/2, person_bb[1]+height_person/2])
return distance
def generateColor():
"""
random generate a colour
:return: random GBR color
"""
color = tuple(np.random.choice(range(256), size=3))
return tuple(int(c) for c in color)
def getSuffix(person_dir):
suffix = (person_dir.strip().split('/'))[-1]
if suffix == '':
suffix = (person_dir.strip().split('/'))[-2]
return suffix
def makeOutDir(person_dir, out_dir_path):
if out_dir_path == None:
suffix = getSuffix(person_dir)
out_dir_path = os.path.join('results/match', suffix)
if not os.path.exists(out_dir_path):
os.makedirs(out_dir_path)
return out_dir_path
def getMismatchedIndices(bboxes, aligned_indices):
"""
compute the indices of the bounding boxes
that do not appear in any of the head-person pairs (matched by the hungarian algorithm)
:param bboxes: bounding boxes
:param aligned_indices: matched indices of bounding boxes
:return: list of indices (of bounding boxes) that are not matched
"""
return [i for i in range(len(bboxes)) if i not in aligned_indices]
def drawRectangles(indices, C, head_bbs, person_bbs, image):
"""
draw head and body bounding boxes on image
:param indices: indices of the paired head bounding boxes and body bounding boxes
:param C: cost matrix
:param head_bbs: head bounding boxes
:param person_bbs: person bounding boxes
:param image: image to draw the rectangles on
"""
pair_indices = [(ind1, ind2) for ind1, ind2 in zip(indices[0], indices[1])]
for (row_ind, col_ind) in pair_indices:
if C[row_ind, col_ind] < 0:
# print('Head: ', row_ind, head_bbs[row_ind], '\nPerson: ', col_ind, person_bbs[col_ind])
color = generateColor()
cv2.rectangle(image, (head_bbs[row_ind][0], head_bbs[row_ind][1]),
(head_bbs[row_ind][2], head_bbs[row_ind][3]),
color, 2)
cv2.rectangle(image, (person_bbs[col_ind][0], person_bbs[col_ind][1]),
(person_bbs[col_ind][2], person_bbs[col_ind][3]),
color, 1)
else:
(indices[0].tolist()).remove(row_ind)
(indices[1].tolist()).remove(col_ind)
for i in getMismatchedIndices(head_bbs, indices[0]):
cv2.rectangle(image, (head_bbs[i][0], head_bbs[i][1]), (head_bbs[i][2], head_bbs[i][3]),
(0, 0, 255), 2)
for i in getMismatchedIndices(person_bbs, indices[1]):
cv2.rectangle(image, (person_bbs[i][0], person_bbs[i][1]), (person_bbs[i][2], person_bbs[i][3]),
(0, 255, 0), 1)
def getPersonBoundingBoxes(person_dir, filename):
json_data = json.load(open(os.path.join(person_dir, filename)))
detections = []
if 'detections' in json_data.keys():
detections = json_data['detections']
person_bbs = [det['bbox'] for det in detections if det['class'] == 'person']
return person_bbs
def getHeadBoundingBoxes(head_file, person_dir, filename):
heads = open(head_file, 'r').readlines()
raw_filename = (person_dir.strip().split('/'))[-1] + '/' + '.'.join((filename.strip().split('.'))[0:-1])
head_line = [line for line in heads if line.find(raw_filename) != -1]
# print(raw_filename, head_line, person_bbs)
head_bbs = []
if len(head_line) > 0: # and len(person_bbs) > 0:
head_bbs = (head_line[0].strip().split('\t'))[1:]
head_bbs = [[int(head_bbs[i]), int(head_bbs[i + 1]), int(head_bbs[i + 2]), int(head_bbs[i + 3])] for i
in range(len(head_bbs)) if i % 5 == 0]
return head_bbs
def computeAlginments(head_bbs, person_bbs):
indices = np.array([[], []])
C = np.zeros([len(head_bbs), len(person_bbs)])
if len(head_bbs) > 0 and len(person_bbs) > 0:
C = cdist(XA=np.array(head_bbs), XB=np.array(person_bbs), metric=computeIoU) # maximize
indices = linear_sum_assignment(C)
return indices, C
def Align(head_file, person_dir, image_dir, out_dir):
# heads = open(head_file, 'r').readlines()
# heads.extend(open(HEAD_bb_path_2, 'r').readlines())
print('Reading in files')
for filename in os.listdir(person_dir):
if filename.find('.json') != -1:
person_bbs = getPersonBoundingBoxes(person_dir, filename)
head_bbs = getHeadBoundingBoxes(head_file, person_dir, filename)
indices, C = computeAlginments(head_bbs, person_bbs)
img_format = '.png'
if image_dir.find('HollywoodHeads')!=-1:
img_format = '.jpeg'
elif image_dir.find('MPII')!=-1:
img_format = '.jpg'
img_filename = '.'.join((filename.strip().split('.'))[0:-1]) + img_format
image = cv2.imread(os.path.join(image_dir, img_filename))
drawRectangles(indices, C, head_bbs, person_bbs, image)
print('image saved to ', os.path.join(out_dir, img_filename))
cv2.imwrite(os.path.join(OUT_DIR, img_filename), image)
def parseArgs(argv=None):
parser = argparse.ArgumentParser(
description='(yolact) body-head aligner')
parser.add_argument('--head',
default='results/head_bounding_boxes/train_v3.csv', type=str,
help='Path to annotated head bounding boxes csv file', required=False)
parser.add_argument('--person', default='results/person_bounding_boxes/film8', type=str,
help='Path to (yolact) directory containing person bounding boxes jsons', required=False)
parser.add_argument('--images', default='data/head_det_corpus_v3/film8', type=str,
help='Path to directory containing raw images', required=False)
parser.add_argument('--outdir', default=None, type=str,
help='Path to output directory', required=False)
global args
args = parser.parse_args(argv)
if __name__ == '__main__':
parseArgs()
HEAD_FILE = args.head
PERSON_DIR = args.person
IMAGE_DIR = args.images
OUT_DIR = makeOutDir(PERSON_DIR, args.outdir)
Align(HEAD_FILE, PERSON_DIR, IMAGE_DIR, OUT_DIR)
|
the-stack_106_29268 | from datetime import datetime, timedelta
from urllib.parse import urljoin
from isodate import parse_datetime, parse_duration
import requests
from solcast.base import Base
class RadiationEstimatedActuals(Base):
end_point = 'radiation/estimated_actuals'
def __init__(self, latitude, longitude, *args, **kwargs):
self.latitude = latitude
self.longitude = longitude
self.latest = kwargs.get('latest', False)
self.estimated_actuals = None
self.params = {'latitude' : self.latitude, 'longitude' : self.longitude}
if self.latest:
self.end_point = self.end_point + '/latest'
self._get(*args, **kwargs)
if self.ok:
self._generate_est_acts_dict()
def _generate_est_acts_dict(self):
self.estimated_actuals = []
for est_act in self.content.get('estimated_actuals'):
# Convert period_end and period. All other fields should already be
# the correct type
est_act['period_end'] = parse_datetime(est_act['period_end'])
est_act['period'] = parse_duration(est_act['period'])
self.estimated_actuals.append(est_act)
|
the-stack_106_29269 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Minolta light-measuring devices
See http://www.konicaminolta.com/instruments
----------
"""
from __future__ import absolute_import, print_function
from builtins import range
from builtins import object
from psychopy import logging
import struct
import sys
import time
try:
import serial
except ImportError:
serial = False
class LS100(object):
"""A class to define a Minolta LS100 (or LS110?) photometer
You need to connect a LS100 to the serial (RS232) port and
**when you turn it on press the F key** on the device. This will put
it into the correct mode to communicate with the serial port.
usage::
from psychopy.hardware import minolta
phot = minolta.LS100(port)
if phot.OK: # then we successfully made a connection
print(phot.getLum())
:parameters:
port: string
the serial port that should be checked
maxAttempts: int
If the device doesn't respond first time how many attempts
should be made? If you're certain that this is the correct
port and the device is on and correctly configured then this
could be set high. If not then set this low.
:troubleshooting:
Various messages are printed to the log regarding the function
of this device, but to see them you need to set the printing of
the log to the correct level::
from psychopy import logging
logging.console.setLevel(logging.ERROR) # error messages only
logging.console.setLevel(logging.INFO) # more info
logging.console.setLevel(logging.DEBUG) # log all communications
If you're using a keyspan adapter (at least on macOS) be aware that
it needs a driver installed. Otherwise no ports will be found.
Error messages:
``ERROR: Couldn't connect to Minolta LS100/110 on ____``:
This likely means that the device is not connected to that port
(although the port has been found and opened). Check that the
device has the `[` in the bottom right of the display;
if not turn off and on again holding the `F` key.
``ERROR: No reply from LS100``:
The port was found, the connection was made and an initial
command worked, but then the device stopped communating. If the
first measurement taken with the device after connecting does
not yield a reasonable intensity the device can sulk (not a
technical term!). The "[" on the display will disappear and you
can no longer communicate with the device. Turn it off and on
again (with F depressed) and use a reasonably bright screen for
your first measurement. Subsequent measurements can be dark
(or we really would be in trouble!!).
"""
longName = "Minolta LS100/LS110"
driverFor = ["ls110", "ls100"]
def __init__(self, port, maxAttempts=1):
super(LS100, self).__init__()
if not serial:
raise ImportError("The module serial is needed to connect to "
"photometers. On most systems this can be "
"installed with\n\t easy_install pyserial")
if type(port) in [int, float]:
# add one so that port 1=COM1
self.portNumber = port
self.portString = 'COM%i' % self.portNumber
else:
self.portString = port
self.portNumber = None
self.isOpen = 0
self.lastQual = 0
self.lastLum = None
self.type = 'LS100'
self.com = False
self.OK = True # until we fail
self.maxAttempts = maxAttempts
self.codes = {
'ER00\r\n': 'Unknown command',
'ER01\r\n': 'Setting error',
'ER11\r\n': 'Memory value error',
'ER10\r\n': 'Measuring range over',
'ER19\r\n': 'Display range over',
'ER20\r\n': 'EEPROM error (the photometer needs repair)',
'ER30\r\n': 'Photometer battery exhausted', }
# try to open the port
_linux = sys.platform.startswith('linux')
if sys.platform in ('darwin', 'win32') or _linux:
try:
self.com = serial.Serial(self.portString)
except Exception:
msg = ("Couldn't connect to port %s. Is it being used by "
"another program?")
self._error(msg % self.portString)
else:
msg = "I don't know how to handle serial ports on %s"
self._error(msg % sys.platform)
# setup the params for comms
if self.OK:
self.com.close() # not sure why this helps but on win32 it does!!
# this is a slightly odd characteristic of the Minolta LS100
self.com.bytesize = 7
self.com.baudrate = 4800
self.com.parity = serial.PARITY_EVEN # none
self.com.stopbits = serial.STOPBITS_TWO
try:
if not self.com.isOpen():
self.com.open()
except Exception:
msg = "Opened serial port %s, but couldn't connect to LS100"
self._error(msg % self.portString)
else:
self.isOpen = 1
if self.OK: # we have an open com port. try to send a command
for repN in range(self.maxAttempts):
time.sleep(0.2)
for n in range(10):
# set to use absolute measurements
reply = self.sendMessage('MDS,04')
if reply[0:2] == 'OK':
self.OK = True
break
elif reply not in self.codes:
self.OK = False
break # wasn't valid
else:
self.OK = False # false so far but keep trying
if self.OK: # we have successfully sent and read a command
logging.info("Successfully opened %s" % self.portString)
def setMode(self, mode='04'):
"""Set the mode for measurements. Returns True (success) or False
'04' means absolute measurements.
'08' = peak
'09' = cont
See user manual for other modes
"""
reply = self.sendMessage('MDS,%s' % mode)
return self.checkOK(reply)
def measure(self):
"""Measure the current luminance and set .lastLum to this value
"""
reply = self.sendMessage('MES')
if self.checkOK(reply):
lum = float(reply.split()[-1])
return lum
else:
return -1
def getLum(self):
"""Makes a measurement and returns the luminance value
"""
return self.measure()
def clearMemory(self):
"""Clear the memory of the device from previous measurements
"""
reply = self.sendMessage('CLE')
ok = self.checkOK(reply)
return ok
def checkOK(self, msg):
"""Check that the message from the photometer is OK.
If there's an error show it (printed).
Then return True (OK) or False.
"""
# also check that the reply is what was expected
if msg[0:2] != 'OK':
if msg == '':
logging.error('No reply from LS100')
sys.stdout.flush()
else:
logging.error('Error message from LS100:' + self.codes[msg])
sys.stdout.flush()
return False
else:
return True
def sendMessage(self, message, timeout=5.0):
"""Send a command to the photometer and wait an allotted
timeout for a response.
The message can be in either bytes or unicode but the returned string
will always be utf-encoded.
"""
# append a newline if necessary (for either str or bytes)
if type(message) == str:
if message[-2:] != '\r\n':
message += '\r\n'
elif type(message) == bytes:
if message[-2:] != b'\r\n':
message += b'\r\n'
# flush the read buffer first
# read as many chars as are in the buffer
self.com.read(self.com.inWaiting())
# then send message and catch any returned chars
for attemptN in range(self.maxAttempts):
# send the message
time.sleep(0.1)
if type(message) != bytes:
message = bytes(message, 'utf-8')
self.com.write(message)
self.com.flush()
time.sleep(0.1)
# get reply (within timeout limit)
self.com.timeout = timeout
# send complete message
logging.debug('Sent command:' + str(message[:-2]))
retVal = self.com.readline().decode('utf-8')
if len(retVal) > 0:
break # we got a reply so can stop trying
return retVal
def _error(self, msg):
self.OK = False
logging.error(msg)
def setMaxAttempts(self, maxAttempts):
"""Changes the number of attempts to send a message and read the
output. Typically this should be low initially, if you aren't sure
that the device is setup correctly but then, after the first
successful reading, set it higher.
"""
self.maxAttempts = maxAttempts
|
the-stack_106_29272 | import sys
from PyQt5.QtCore import Qt, QDir, QUrl
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QWidget, QGridLayout, QLabel, QPushButton, QApplication
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
class janela_vencedor(QWidget):
def __init__(self, equipe_vencedora):
QWidget.__init__(self)
layout = QGridLayout()
self.setLayout(layout)
self.setFixedSize(800,400)
titulo = QLabel("CAMPEAO DO VOLBSI 2019")
titulo.setStyleSheet("color: yellow")
titulo.setFont(QFont("Courier", 36))
titulo.setAlignment(Qt.AlignHCenter)
layout.addWidget(titulo)
campeao = QLabel(f'\ {equipe_vencedora} /')
campeao.setStyleSheet("color: white")
campeao.setFont(QFont("Decorative", 48))
campeao.setAlignment(Qt.AlignCenter)
layout.addWidget(campeao)
self.botao_sair = QPushButton("SAIR")
self.botao_sair.setFont(QFont("Courier", 16))
self.botao_sair.clicked.connect(quit)
layout.addWidget(self.botao_sair)
self.music = '/home/Documentos/projetos/volbsi/music.mp3'
self.player = QMediaPlayer()
self.player.setMedia(QMediaContent(QUrl.fromLocalFile(self.music)))
self.player.play()
self.show()
app = QApplication(sys.argv)
screen = janela_vencedor("CORNOS FC")
screen.show()
sys.exit(app.exec_())
|
the-stack_106_29273 | from constants import *
from utils.db import connect_db
@connect_db
def add_email(db, email):
table = db[EMAILS_TABLE]
table.upsert(email, [EMAIL_KEY])
@connect_db
def remove_email(db, email):
table = db[EMAILS_TABLE]
table.delete(email=email)
@connect_db
def get_email(db, email):
table = db[EMAILS_TABLE]
row = table.find_one(email=email)
if row is not None:
return row
return None
@connect_db
def get_blocked_subscribers(db, offset, limit):
table = db[EMAILS_TABLE]
return table.find(status="blacklisted")
@connect_db
def remove_subscriber(db, uuid):
table = db[EMAILS_TABLE]
table.delete(uuid=str(uuid))
@connect_db
def get_subscriber(db, uuid):
table = db[EMAILS_TABLE]
row = table.find_one(uuid=str(uuid))
if row is not None:
return row
return None
@connect_db
def get_all_emails(db, offset, limit):
table = db[EMAILS_TABLE]
return table.find()
@connect_db
def get_all_emails_unpaginated(db):
table = db[EMAILS_TABLE]
return table.find()
@connect_db
def add_uppy_config(db, config):
table = db[UPPY_TABLE]
config["id"] = 1
table.upsert(config, ["id"])
@connect_db
def get_uppy_config(db):
table = db[UPPY_TABLE]
row = table.find_one(id=1)
if row is not None:
return row
return None
@connect_db
def add_campaign(db, campaign):
table = db[CAMPAIGNS_TABLE]
campaign[UUID_KEY] = str(campaign[UUID_KEY])
campaign[TEMPLATE_KEY] = str(campaign[TEMPLATE_KEY])
table.upsert(campaign, [UUID_KEY])
@connect_db
def get_campaign(db, uuid):
table = db[CAMPAIGNS_TABLE]
row = table.find_one(uuid=str(uuid))
if row is not None:
return row
return None
@connect_db
def get_all_campaigns(db, offset, limit):
table = db[CAMPAIGNS_TABLE]
return table.find()
@connect_db
def get_all_campaigns_unpaginated(db):
table = db[CAMPAIGNS_TABLE]
return table.find()
@connect_db
def get_campaign_count(db):
return len(db[CAMPAIGNS_TABLE])
@connect_db
def add_campaign_config(db, config):
table = db[CONFIG_TABLE]
config[UUID_KEY] = str(config[UUID_KEY])
table.upsert(config, [UUID_KEY])
@connect_db
def remove_campaign_config(db, uuid):
table = db[CONFIG_TABLE]
table.delete(uuid=str(uuid))
@connect_db
def get_campaign_config(db, uuid):
table = db[CONFIG_TABLE]
row = table.find_one(uuid=str(uuid))
if row is not None:
return row
return None
@connect_db
def get_campaign_config_default(db):
table = db[CONFIG_TABLE]
row = table.find_one(id=id)
if row is not None:
return row
return None
@connect_db
def get_all_campaign_configs(db, offset, limit):
table = db[CONFIG_TABLE]
return table.find()
|
the-stack_106_29275 | import os
import sys
import shutil
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
"""
Key Value writer
"""
def writekvs(self, kvs):
"""
write a dictionary to file
:param kvs: (dict)
"""
raise NotImplementedError
class SeqWriter(object):
"""
sequence writer
"""
def writeseq(self, seq):
"""
write an array to file
:param seq: (list)
"""
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
"""
log to a file, in a human readable format
:param filename_or_file: (str or File) the file to write the log to
"""
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'write'), 'Expected file or str, got {}'.format(filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
warnings.warn('Tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
@classmethod
def _truncate(cls, string):
return string[:20] + '...' if len(string) > 23 else string
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
"""
closes the file
"""
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
"""
log to a file, in the JSON format
:param filename: (str) the file to write the log to
"""
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for key, value in sorted(kvs.items()):
if hasattr(value, 'dtype'):
if value.shape == () or len(value) == 1:
# if value is a dimensionless numpy array or of length 1, serialize as a float
kvs[key] = float(value)
else:
# otherwise, a value is a numpy array, serialize as a list or nested lists
kvs[key] = value.tolist()
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
"""
closes the file
"""
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
"""
log to a file, in a CSV format
:param filename: (str) the file to write the log to
"""
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, key) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(key)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for i, key in enumerate(self.keys):
if i > 0:
self.file.write(',')
value = kvs.get(key)
if value is not None:
self.file.write(str(value))
self.file.write('\n')
self.file.flush()
def close(self):
"""
closes the file
"""
self.file.close()
def summary_val(key, value):
"""
:param key: (str)
:param value: (float)
"""
kwargs = {'tag': key, 'simple_value': float(value)}
return tf.Summary.Value(**kwargs)
def valid_float_value(value):
"""
Returns True if the value can be successfully cast into a float
:param value: (Any) the value to check
:return: (bool)
"""
try:
float(value)
return True
except TypeError:
return False
class TensorBoardOutputFormat(KVWriter):
def __init__(self, folder):
"""
Dumps key/value pairs into TensorBoard's numeric format.
:param folder: (str) the folder to write the log to
"""
os.makedirs(folder, exist_ok=True)
self.dir = folder
self.step = 1
prefix = 'events'
path = os.path.join(os.path.abspath(folder), prefix)
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
summary = tf.Summary(value=[summary_val(k, v) for k, v in kvs.items() if valid_float_value(v)])
event = event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
"""
closes the file
"""
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(_format, ev_dir, log_suffix=''):
"""
return a logger for the requested format
:param _format: (str) the requested format to log to ('stdout', 'log', 'json', 'csv' or 'tensorboard')
:param ev_dir: (str) the logging directory
:param log_suffix: (str) the suffix for the log file
:return: (KVWrite) the logger
"""
os.makedirs(ev_dir, exist_ok=True)
if _format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif _format == 'log':
return HumanOutputFormat(os.path.join(ev_dir, 'log%s.txt' % log_suffix))
elif _format == 'json':
return JSONOutputFormat(os.path.join(ev_dir, 'progress%s.json' % log_suffix))
elif _format == 'csv':
return CSVOutputFormat(os.path.join(ev_dir, 'progress%s.csv' % log_suffix))
elif _format == 'tensorboard':
return TensorBoardOutputFormat(os.path.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (_format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
:param key: (Any) save to log this key
:param val: (Any) save to log this value
"""
Logger.CURRENT.logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
:param key: (Any) save to log this key
:param val: (Number) save to log this value
"""
Logger.CURRENT.logkv_mean(key, val)
def logkvs(key_values):
"""
Log a dictionary of key-value pairs
:param key_values: (dict) the list of keys and values to save to log
"""
for key, value in key_values.items():
logkv(key, value)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
Logger.CURRENT.dumpkvs()
def getkvs():
"""
get the key values logs
:return: (dict) the logged values
"""
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
:param args: (list) log the arguments
:param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the DEBUG level.
:param args: (list) log the arguments
"""
log(*args, level=DEBUG)
def info(*args):
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the INFO level.
:param args: (list) log the arguments
"""
log(*args, level=INFO)
def warn(*args):
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the WARN level.
:param args: (list) log the arguments
"""
log(*args, level=WARN)
def error(*args):
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the ERROR level.
:param args: (list) log the arguments
"""
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
:param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
Logger.CURRENT.set_level(level)
def get_level():
"""
Get logging threshold on current logger.
:return: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
return Logger.CURRENT.level
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
:return: (str) the logging directory
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
class ProfileKV:
def __init__(self, name):
"""
Usage:
with logger.ProfileKV("interesting_scope"):
code
:param name: (str) the profiling name
"""
self.name = "wait_" + name
def __enter__(self):
self.start_time = time.time()
def __exit__(self, _type, value, traceback):
Logger.CURRENT.name2val[self.name] += time.time() - self.start_time
def profile(name):
"""
Usage:
@profile("my_func")
def my_func(): code
:param name: (str) the profiling name
:return: (function) the wrapped function
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(name):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
class Logger(object):
# A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
DEFAULT = None
CURRENT = None # Current logger being used by the free functions above
def __init__(self, folder, output_formats):
"""
the logger class
:param folder: (str) the logging location
:param output_formats: ([str]) the list of output format
"""
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = folder
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
:param key: (Any) save to log this key
:param val: (Any) save to log this value
"""
self.name2val[key] = val
def logkv_mean(self, key, val):
"""
The same as logkv(), but if called many times, values averaged.
:param key: (Any) save to log this key
:param val: (Number) save to log this value
"""
if val is None:
self.name2val[key] = None
return
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
"""
Write all of the diagnostics from the current iteration
"""
if self.level == DISABLED:
return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
:param args: (list) log the arguments
:param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
"""
Set logging threshold on current logger.
:param level: (int) the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
self.level = level
def get_dir(self):
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
:return: (str) the logging directory
"""
return self.dir
def close(self):
"""
closes the file
"""
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
"""
log to the requested format outputs
:param args: (list) the arguments to log
"""
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.DEFAULT = Logger.CURRENT = Logger(folder=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(folder=None, format_strs=None):
"""
configure the current logger
:param folder: (str) the save location (if None, $OPENAI_LOGDIR, if still None, tempdir/openai-[date & time])
:param format_strs: (list) the output logging format
(if None, $OPENAI_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])
"""
if folder is None:
folder = os.getenv('OPENAI_LOGDIR')
if folder is None:
folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(folder, str)
os.makedirs(folder, exist_ok=True)
log_suffix = ''
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if rank > 0:
log_suffix = "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, folder, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(folder=folder, output_formats=output_formats)
log('Logging to %s' % folder)
def reset():
"""
reset the current logger
"""
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class ScopedConfigure(object):
def __init__(self, folder=None, format_strs=None):
"""
Class for using context manager while logging
usage:
with ScopedConfigure(folder=None, format_strs=None):
{code}
:param folder: (str) the logging folder
:param format_strs: ([str]) the list of output logging format
"""
self.dir = folder
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(folder=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
"""
tests for the logger module
"""
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
folder = "/tmp/testlogging"
if os.path.exists(folder):
shutil.rmtree(folder)
configure(folder=folder)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
with ScopedConfigure(None, None):
info("^^^ should see b = 33.3")
with ScopedConfigure("/tmp/test-logger/", ["json"]):
logkv("b", -2.5)
dumpkvs()
reset()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
warn("hey")
error("oh")
logkvs({"test": 1})
# ================================================================
# Readers
# ================================================================
def read_json(fname):
"""
read a json file using pandas
:param fname: (str) the file path to read
:return: (pandas DataFrame) the data in the json
"""
import pandas
data = []
with open(fname, 'rt') as file_handler:
for line in file_handler:
data.append(json.loads(line))
return pandas.DataFrame(data)
def read_csv(fname):
"""
read a csv file using pandas
:param fname: (str) the file path to read
:return: (pandas DataFrame) the data in the csv
"""
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
read a tensorboard output
:param path: (str) a tensorboard file OR a directory, where we will find all TB files of the form events.
:return: (pandas DataFrame) the tensorboad data
"""
import pandas
import numpy as np
from glob import glob
# from collections import defaultdict
import tensorflow as tf
if os.path.isdir(path):
fnames = glob(os.path.join(path, "events.*"))
elif os.path.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s" % path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for value in summary.summary.value:
pair = (summary.step, value.simple_value)
tag2pairs[value.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step - 1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
|
the-stack_106_29276 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http: //www.runoob.com/python/python-exercise-example85.html
def func(num):
j = 1
sum = 9
m = 9
flag = True
while flag:
if sum % num == 0:
print(sum)
flag = False
else:
m *= 10
sum += m
j += 1
print("%d 个 9 可以被 %d 整除 : %d" % (j, num, sum))
r = sum / num
print("%d / %d = %d" % (sum, num, r))
# 给出一个奇数需要多少个99 才可以除尽它
if __name__ == "__main__":
num = 21
# num = int(input("请输入一个正奇数:"))
if num > 0 and num % 2 == 1:
func(num)
else:
print("输入错误")
|
the-stack_106_29280 | # circulaRprint #
# Circular Plotter controller for Raspberry Pi #
# Author: Oehrly, 2018 #
#################################################
#
# SegmentAccelerationPlanner and StepAccelerationPlanner
# The SegmentAP manages the acceleration from segment to segment.
# The StepAP manages the per step acceleration within a segment.
#
# Acknowledgement:
# ... and may somebody else help you back on track when you're lost.
# Thank you to Grbl for inspiration and to the one who wrote the extensive and useful comments.
# for comparison see https://github.com/gnea/grbl/blob/master/grbl/planner.c
# Calculations heavily rely on the fact that every PathSegment contains ONE (!) Beta axis step at max.
# This is guaranteed by the way the XYPlaner handles path calculations.
from math import sqrt, radians, ceil
from datastructures import PathSegment
class SegmentAccelerationPlanner:
# Calculates the acceleration plan based on the following guidelines:
#
# 1. No junction speed may be higher than the specified junction speed limits or the entry/exit
# speeds of neighbouring segments.
# 2. The velocity difference between entry and exit speed of one segment has to be within the
# allowable acceleration/deceleration limits for the head and all axes.
# 3. Every segment should run at the acceleration limit so the speed is as high as possible
# within the limits.
#
# Therefore the planner goes over every segment first in reverse order always making sure that the deceleration is
# always at the maximum limit for reaching the entry speed of the following segment.
# The last segment is always planned from a full stop to ensure that deceleration to a full stop is always possible.
#
# After that the planner goes over every segment in chronological (forward) order to ensure that no acceleration is
# higher than allowed. It dials down the entry speeds of the next segments accordingly.
#
# To increase computational efficiency the planner checks during the forward pass up to which index of the buffer
# the acceleration is already optimally planned. A indicator is set to this index. On the next go the planner only
# calculates from the back of the buffer up to that index and back.
#
# If the buffer length is set too low, it can happen that the maximum speed is never reached as the total distance
# in the buffer is not long enough to accelerate to the maximum speed.
def __init__(self, config, provider):
# load data from configuration
self.FEED_RATE_MM = float(config['feed_rate'])
self.ACCELERATION = float(config['acceleration'])
self.MINIMUM_SPEED = float(config['minimum_feed_rate'])
self.JUNCTION_DEVIATION = float(config['junction_deviation'])
self.BETA_RADIUS = float(config['rB'])
self.ALPHA_RES = float(radians(config['alpha_res']))
self.BETA_RES = float(radians(config['beta_res']))
self.ALPHA_ACCELERATION = float(config['a_acceleration'])
self.BETA_ACCELERATION = float(config['b_acceleration'])
# load max axis feed rates and convert from mm/sec to degree/sec
self.ALPHA_FEED_RATE = float(config['a_feed_rate']) * self.ALPHA_RES
self.BETA_FEED_RATE = float(config['b_feed_rate']) * self.BETA_RES
self.Z_DELAY = 1 / float(config['z_feed_rate'])
# calculate maximum beta acceleration in mm/sec^2 from steps/sec^2
# this is calculated independently per segment for alpha,
# as the alpha radius changes depending on the current position
self.BETA_ACCELERATION_MM = self.BETA_ACCELERATION * self.BETA_RADIUS * self.BETA_RES
self.previous_nominal_speed = 0
self.previous_segment_x = 0
self.previous_segment_y = 0
self.previous_x_unit_vec = 0
self.previous_y_unit_vec = 0
self.previous_alpha = 0
self.previous_beta = 0
# the processing buffer is used to store the segments while they are used for calculations
# testing revealed that a list seems to be fastest in the current implementation
# an experimental ringbuffer was faster shifting the data through,
# but it had to much overhead for iterations through the whole buffer
self.processing_buffer = list()
self.buffer_length = int(config['SegAP_buffer_length'])
# buffer_planned indicates up to which index the buffer is already calculated optimally
# After recalculation is done, all segments which are already optimal are deleted from the
# processing buffer. After that the buffer is topped up again.
self.buffer_planned = 0
# completed segments stores segments which are already fully calculated until they are requested
self.completed_segments = list()
self.bypass = list()
self.last_segment_index = int(0)
self.provider = provider # class which provides new segments
def serve_next(self):
if self.bypass and self.bypass[0].index == self.last_segment_index + 1:
self.last_segment_index = self.bypass[0].index
return self.bypass.pop(0)
if self.completed_segments:
self.last_segment_index = self.completed_segments[0].index
return self.completed_segments.pop(0)
else:
while len(self.processing_buffer) < self.buffer_length:
segment = self.provider.serve_next()
if not segment:
break
segment, go_on = self.initiate_segment(segment)
if go_on:
self.processing_buffer.append(segment)
else:
if segment.index == self.last_segment_index + 1:
self.last_segment_index = segment.index
return segment
else:
self.bypass.append(segment)
if not self.processing_buffer:
return None
if len(self.processing_buffer) > 1:
self.recalculate_segment_buffer()
if self.buffer_planned < 1:
self.last_segment_index = self.processing_buffer[0].index
ret = self.processing_buffer.pop(0)
return ret
else:
self.completed_segments = self.processing_buffer[:self.buffer_planned + 1]
del self.processing_buffer[:self.buffer_planned + 1]
self.buffer_planned = 1
self.last_segment_index = self.completed_segments[0].index
return self.completed_segments.pop(0)
def recalculate_segment_buffer(self):
# reverse pass through the processing buffer
# always starts with an empty segment at the end to represent a stop
current_seg = PathSegment(0, 0, 0, 0, 0, 0, 0, 0, None, None)
for idx in range(-1, -len(self.processing_buffer) + self.buffer_planned, -1):
next_seg = current_seg
current_seg = self.processing_buffer[idx]
# calculate the maximum entry speed by decelerating over the current segment
# (mathematically we accelerate in reverse)
entry_speed_sqr = next_seg.entry_speed_sqr + 2 * current_seg.acceleration * current_seg.distance
if entry_speed_sqr < current_seg.max_entry_speed_sqr:
current_seg.entry_speed_sqr = entry_speed_sqr
else:
current_seg.entry_speed_sqr = current_seg.max_entry_speed_sqr
# forward_pass
next_seg = self.processing_buffer[self.buffer_planned]
for idx in range(self.buffer_planned + 1, len(self.processing_buffer), 1):
current_seg = next_seg
next_seg = self.processing_buffer[idx]
# calculate maximum entry speed by accelerating over the current segment
if current_seg.entry_speed_sqr < next_seg.entry_speed_sqr:
entry_speed_sqr = current_seg.entry_speed_sqr + 2 * current_seg.acceleration * current_seg.distance
if entry_speed_sqr < next_seg.entry_speed_sqr:
next_seg.entry_speed_sqr = entry_speed_sqr
# the current segment is at maximum acceleration, the buffer can not be improved anymore up to here
# the buffer_planned indicator gets set to the new position
self.buffer_planned = idx - 2
def limit_feed_rate_by_axis_limit(self, segment):
# limits the feed rate for this segment to the lowest value of head, alpha and beta
# calculate the minimum duration for the segment based on how far the axes
# need to move and their maximum speeds (time = distance / velocity)
alpha_dif = abs(self.previous_alpha - segment.alpha)
t_min_a = alpha_dif / self.ALPHA_FEED_RATE
beta_dif = abs(self.previous_beta - segment.beta)
t_min_b = beta_dif / self.BETA_FEED_RATE
# calculate maximum feed rate in mm/sec (velocity = distance / time)
axis_feed_limit_mm = segment.distance / max(t_min_a, t_min_b)
return min(self.FEED_RATE_MM, axis_feed_limit_mm)
def limit_acceleration_by_axis_limit(self, segment):
# limits the acceleration in this segment to the axes' maximum and the head's maximum
# head acceleration is always relevant so it is added to the list of possible limits
# by default
acceleration_values = [self.ACCELERATION]
# The alpha axis acceleration limit is set in steps/sec^2. This corresponds to a different amount
# of mm/sec^2 depending on the distance of the current position to the table's center.
# Therefore the acceleration limit in mm/sec^2 needs to be calculated for every segment
# if the alpha axis needs to move.
# In theory it needs to be calculated for every step but per segment should be exact enough.
if segment.a_steps:
alpha_dif = abs(self.previous_alpha - segment.alpha)
distance_to_centre = sqrt(segment.x ** 2 + segment.y ** 2) # TODO set value in PathPlanner
distance_per_alpha_step = (distance_to_centre * alpha_dif) / abs(segment.a_steps)
alpha_acceleration_mm = self.ALPHA_ACCELERATION * distance_per_alpha_step # mm/s^2 = steps/s^2 * mm/step
acceleration_values.append(alpha_acceleration_mm)
# If the beta axis needs to move, the precalculated acceleration limit is added
# to the list of possible values
if segment.b_steps:
acceleration_values.append(self.BETA_ACCELERATION_MM)
return min(acceleration_values)
def initiate_segment(self, segment):
# All necessary values for further calculations are set here.
# After this the segment can be loaded into the processing buffer.
#
# Technically some of these calculations are only suitable for cartesian printers.
# Though, as one segment covers a very short distance only (normally),
# everything should be approximately right. (For very short distances the movement is almost linear)
# set z axis delay
segment.z_delay = self.Z_DELAY
# calculate distance covered by segment
x_distance = segment.x - self.previous_segment_x
y_distance = segment.y - self.previous_segment_y
segment.distance = sqrt(x_distance ** 2 + y_distance ** 2)
if not segment.a_steps and not segment.b_steps:
return segment, False # filter out zero-length segments
# calculate unit vectors
segment.x_unit_vector = x_distance / segment.distance
segment.y_unit_vector = y_distance / segment.distance
# calculate feedrate and acceleration
# TODO: add G00/G01 differences here
segment.nominal_speed = self.limit_feed_rate_by_axis_limit(segment)
segment.acceleration = self.limit_acceleration_by_axis_limit(segment)
# calculate angle between previous and current path
# the angle is the dot product of the two vectors
# the calculation of the dot product is simplified through the usage of the unit vectors
junction_cos_theta = -self.previous_x_unit_vec * segment.x_unit_vector - self.previous_y_unit_vec * segment.y_unit_vector
if junction_cos_theta > 0.999999:
# angle is 0 degrees i.e. the path makes a full turn
segment.max_junction_speed_sqr = self.MINIMUM_SPEED ** 2
elif junction_cos_theta < -0.999999:
# angle is 180 degrees i.e. the junction is a straight line
segment.max_junction_speed_sqr = float('inf')
else:
sin_theta_d2 = sqrt(0.5 * (1.0 - junction_cos_theta))
# Trig half angle identity. Always positive. (Whatever that means; just taken from Grbl; it works...yeahy ;)
# TODO: segment.acceleration is better replaced with junction_acceleration (see grbl)
segment.max_junction_speed_sqr = max((self.MINIMUM_SPEED ** 2,
(segment.acceleration * self.JUNCTION_DEVIATION * sin_theta_d2) /
(1 - sin_theta_d2)
))
# calculate max entry speed
if segment.nominal_speed > self.previous_nominal_speed:
segment.max_entry_speed_sqr = self.previous_nominal_speed ** 2
else:
segment.max_entry_speed_sqr = segment.nominal_speed ** 2
if segment.max_entry_speed_sqr > segment.max_junction_speed_sqr:
segment.max_entry_speed_sqr = segment.max_junction_speed_sqr
# these variables are needed as a reference back to the current segment when calculating the next one
self.previous_nominal_speed = segment.nominal_speed
self.previous_x_unit_vec = segment.x_unit_vector
self.previous_y_unit_vec = segment.y_unit_vector
self.previous_segment_x = segment.x
self.previous_segment_y = segment.y
self.previous_alpha = segment.alpha
self.previous_beta = segment.beta
return segment, True
class StepAccelerationPlanner:
# Calculates the per step timing from segment entry and exit speeds
# Goals are:
# 1. Run both axis at as high as possible speeds for as long as possible
# 2. Stay within all acceleration limits
#
# The planner accelerates/decelerates or cruises the axes so that the required exit speeds are reached.
# This should always be possible within the per axis acceleration limits, as these are taken into account
# when calculating the per segment acceleration in SegmentAccelerationPlanner.
# If there are are enough steps to accelerate higher than the exit speed and decelerate again in time,
# the planner will do so to maximize running speeds.
def __init__(self, config, provider):
# Load data from configuration file
self.ALPHA_MAX_RATE = float(config['a_feed_rate'])
self.BETA_MAX_RATE = float(config['b_feed_rate'])
self.ALPHA_ACCELERATION = float(config['a_acceleration'])
self.BETA_ACCELERATION = float(config['b_acceleration'])
self.MINIMUM_RATE = float(config['minimum_axis_feed_rate'])
# current and next segment; the current one is calculated,
# the next one is needed as reference for the exit speed
self.current = None
self.next = None
self.bypass = list()
self.last_segment_index = 0
# current per axis step rates in steps/second
self.rate_a = float(0)
self.rate_b = float(0)
self.provider = provider
def serve_next(self):
while 'this loop just goes on until a segment is returned':
if self.bypass and self.bypass[0].index == self.last_segment_index + 1:
self.last_segment_index = self.bypass[0].index
return self.bypass.pop(0)
while not self.next:
# should only happen on first call and maybe next few
# while there has not been a usable segment for self.next yet
next_segment = self.provider.serve_next()
if next_segment.a_steps and next_segment.b_steps:
self.next = next_segment
else:
self.last_segment_index = next_segment.index
return next_segment
segment = self.provider.serve_next()
if not segment:
segment = PathSegment(0, 0, 0, 0, 0, 0, 0, 0, None, None)
elif not segment.a_steps and not segment.b_steps:
self.bypass.append(segment)
continue
self.current = self.next
self.next = segment
current_max_rate_a, current_max_rate_b = self.nominal_rate(self.current)
next_max_entry_rate_a, next_max_entry_rate_b = self.max_entry_rate(self.next)
self.accelerate_a(self.current, current_max_rate_a, next_max_entry_rate_a)
self.accelerate_b(self.current, current_max_rate_b, next_max_entry_rate_b)
self.last_segment_index = self.current.index
return self.current
def max_entry_rate(self, segment):
# calculate the maximum entry step rate (steps/sec) for each axis in this segment
# the calculation assumes the whole segment would be run at its entry speed (which is not really the case)
# but it gives a theoretical segment duration which allows to calculate the entry rate
if segment.entry_speed_sqr:
t = segment.distance / sqrt(segment.entry_speed_sqr)
max_entry_rate_a = abs(segment.a_steps) / t
max_entry_rate_b = abs(segment.b_steps) / t
return max(max_entry_rate_a, self.MINIMUM_RATE), max(max_entry_rate_b, self.MINIMUM_RATE)
# return a low but non-zero rate
# if the calculation returned 0 in case of a 0 entry speed, the printer would pause forever
# also if the returned value is too low, the printer will no longer run smooth at slow speeds
# a high value will not allow the printer to go slow enough and will also result in non-smooth runs
return self.MINIMUM_RATE, self.MINIMUM_RATE
def nominal_rate(self, segment):
# calculate the nominal (i.e. maximum) step rate per axis for this segment
# the calculation assumes the whole segment is run at its nominal speed. This results in a minimum
# segment duration which allows to calculate the maximum step rate
if segment.nominal_speed:
t = segment.distance / segment.nominal_speed
max_rate_a = abs(segment.a_steps) / t
max_rate_b = abs(segment.b_steps) / t
return max(max_rate_a, self.MINIMUM_RATE), max(max_rate_b, self.MINIMUM_RATE)
# return a low but non-zero rate
# same reasoning as for max_entry_rate() above
return self.MINIMUM_RATE, self.MINIMUM_RATE
def accelerate_a(self, segment, max_rate_a, exit_rate_a):
# this function handles acceleration, cruising and deceleration for the alpha axis within one segment
# possible scenarios are:
# acc
# acc acc cruise cruise cruise decel
# only cruise only decel decel only
# / ---- -------------- ---------- ---- \
# / / / \ \ \
# / / / \ \ \
#
# for acc/cruise/decel the entry speed can be lower/equal/higher than the exit speed
#
# reverse counting the number of steps gives us the remaining number of steps for this segment
# i.e. n is the number of steps remaining in this segment
for remaining in range(abs(segment.a_steps), 0, -1):
v_dif_a = self.rate_a - exit_rate_a
# if slower than exit speed, always accelerate
if v_dif_a < 0:
self.rate_a += sqrt(2 * self.ALPHA_ACCELERATION)
# check that neither current maximum nor next maximum entry speed are exceeded
if self.rate_a > max_rate_a:
self.rate_a = max_rate_a
if self.rate_a > exit_rate_a and remaining == 1:
self.rate_a = exit_rate_a
# if faster than or equal to exit speed
else:
# calculate the number of steps it takes to decelerate from current rate to exit_rate
breaking_dist = ceil((v_dif_a ** 2) / (2 * self.ALPHA_ACCELERATION))
# accelerate if there are enough steps left for decelerating within the acceleration limit
# and current step rate is below max rate
# check against breaking_dist + 1 is necessary because one step of acceleration also requires
# one more step of deceleration than was required before accelerating
if breaking_dist + 1 < remaining and self.rate_a < max_rate_a:
self.rate_a += sqrt(2 * self.ALPHA_ACCELERATION)
# check that current maximum rate is not exceeded
if self.rate_a > max_rate_a:
self.rate_a = max_rate_a
# decelerate now
elif breaking_dist + 1 >= remaining:
self.rate_a -= sqrt(2 * self.ALPHA_ACCELERATION)
# don't decelerate to a slower speed than required
if self.rate_a < exit_rate_a:
self.rate_a = exit_rate_a
# if none of conditions above is met self.rate_a is not change (i.e. cruise)
# TODO not possible at all?!?
segment.a_timing.append(1 / self.rate_a)
def accelerate_b(self, segment, max_rate_b, exit_rate_b):
# Handles acceleration/cruising/deceleration for the beta axis
# As each path segment may contain only one beta step, only one of these is possible per segment
# do nothing if there is no b step
if not segment.b_steps:
return
v_dif_b = self.rate_b - exit_rate_b
# accelerate, current speed is slower than exit speed
if v_dif_b < 0:
self.rate_b += sqrt(2 * self.BETA_ACCELERATION)
# check that neither current maximum nor next maximum entry speed are exceeded
if self.rate_b > max_rate_b:
self.rate_b = max_rate_b
if self.rate_b > exit_rate_b:
self.rate_b = exit_rate_b
# decelerate, faster than exit speed
elif v_dif_b > 0:
self.rate_b -= sqrt(2 * self.BETA_ACCELERATION)
# don't decelerate to a slower speed than required
if self.rate_b < exit_rate_b:
self.rate_b = exit_rate_b
# if none of the above two conditions is met, the axis is already running at the desired speed
b_delay = 1 / self.rate_b
segment.b_timing.append(b_delay)
|
the-stack_106_29282 | import argparse
import datetime
import json
import logging
import os
import os.path
import random
import sys
import time
import urllib.request
from mastodon import Mastodon
import tracery
from tracery.modifiers import base_english
logging.basicConfig(level=logging.INFO)
class Config:
def __init__(self, path):
self.path = os.path.abspath(os.path.expanduser(path))
with open(self.path) as f:
self.from_dict(json.load(f))
def from_dict(self, json):
self.base_url = json['base_url']
self.client_id = json['client_id']
self.client_secret = json['client_secret']
self.access_token = json['access_token']
self.post_interval = json['post_interval']
self.grammar_file = json['grammar_file']
def get_api(config):
return Mastodon(client_id=config.client_id,
client_secret=config.client_secret,
api_base_url=config.base_url,
access_token=config.access_token)
class TraceryBot:
def __init__(self, config):
self.config = config
self.api = get_api(self.config)
with open(self.config.grammar_file) as f:
self.grammar = tracery.Grammar(json.load(f))
self.grammar.add_modifiers(base_english)
self.last_notification = -1
def handle_notifications(self):
try:
notifications = self.api.notifications()
except Exception as e:
logging.error('Exception while fetching notifications: %s', e)
return
if isinstance(notifications, dict) and ('error' in notifications):
raise Exception('API error: {}'.format(notifications['error']))
if self.last_notification == -1:
# if we've just started running, don't autorespond
# retroactively
if len(notifications) > 0:
self.last_notification = int(notifications[0]['id'])
logging.debug('Ignoring previous notifications up to %d', self.last_notification)
else:
self.last_notification = 0
else:
# reversed order to process notification in chronological order
for notification in notifications[::-1]:
if int(notification['id']) <= self.last_notification:
continue
if notification['type'] != 'mention':
continue
logging.debug('Handling notification %s', notification['id'])
self.last_notification = int(notification['id'])
sender = notification['status']['account']['acct']
reply_attempts_remaining = 10
while reply_attempts_remaining:
reply = '@{} {}'.format(
sender,
self.grammar.flatten("#reply#"))
if len(reply) <= 500:
break
reply_attempts_remaining -= 1
if reply_attempts_remaining == 0:
logging.debug("Couldn't generate reply to notification %s", notification['id'])
return
reply_sent = self.api.status_post(reply,
in_reply_to_id=notification['status']['id'])
logging.info('Responded to status %s from %s',
notification['status']['id'],
notification['status']['account']['acct'])
def post_toot(self):
attempts_remaining = 10
while attempts_remaining:
toot = self.grammar.flatten("#toot#")
if len(toot) <= 500:
break
attempts_remaining -= 1
if attempts_remaining == 0:
logging.debug("Couldn't generate toot")
return
self.api.status_post(toot, visibility='public')
def run(self):
countdown = 0
while True:
if countdown <= 0:
self.post_toot()
countdown = self.config.post_interval
countdown -= 1
self.handle_notifications()
time.sleep(60)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='File to load the config from.',
default='config.json')
args = parser.parse_args()
config = Config(args.config)
bot = TraceryBot(config)
bot.run()
if __name__ == '__main__':
main()
|
the-stack_106_29283 | import pkg_resources
from mako.lookup import TemplateLookup
from bravado_types.config import Config
from bravado_types.data_model import SpecInfo
from bravado_types.metadata import Metadata
def render(metadata: Metadata, spec: SpecInfo, config: Config) -> None:
"""
Render module and stub files for a given Swagger schema.
:param metadata: Code generation metadata.
:param spec: SpecInfo representing the schema.
:param config: Code generation configuration.
"""
template_dirs = []
if config.custom_templates_dir:
template_dirs.append(config.custom_templates_dir)
template_dirs.append(
pkg_resources.resource_filename(__name__, "templates/"))
lookup = TemplateLookup(directories=template_dirs)
py_template = lookup.get_template("module.py.mako")
with open(config.py_path, "w") as f:
f.write(py_template.render(metadata=metadata, spec=spec,
config=config))
pyi_template = lookup.get_template("module.pyi.mako")
with open(config.pyi_path, "w") as f:
f.write(pyi_template.render(metadata=metadata, spec=spec,
config=config))
if config.postprocessor:
config.postprocessor(config.py_path, config.pyi_path)
|
the-stack_106_29286 | import pytest
import torch
from deepspeech.models import DeepSpeech2
@pytest.fixture
def model():
return DeepSpeech2()
def test_load_state_dict_restores_parameters(model):
act_model = DeepSpeech2()
act_model.load_state_dict(model.state_dict())
# Naive equality check: all network parameters are equal.
exp_params = dict(model.network.named_parameters())
print(exp_params.keys())
for act_name, act_param in act_model.network.named_parameters():
assert torch.allclose(act_param.float(), exp_params[act_name].float())
|
the-stack_106_29288 | import logging
import requests
import pickle
import os
import json
import urllib
from math import sin, cos, sqrt, atan2, radians
log = logging.getLogger(__name__)
# Telegram tokens - see https://www.mariansauter.de/2018/01/send-telegram-notifications-to-your-mobile-from-python-opensesame/
#
bot_token = 'your token'
bot_chatID = 'your chat id'
# Constants
#
# average ICE g/km
ice_emissions = 120.1
# Average UK g/kWh
# electric_emissions = 225.0
# Octopus g/kWh
electric_emissions = 0.0
# max range
#
wltp = 279
# FIX THIS - add in SOH to calcs
# list of non-stanard chargers
#
chargers = [
{'latitude':0, 'longitude':0, 'msg':'At home, '+'5p/KWh overnight'}
]
"""
Poll to see if car is being charged. If so :
1. Disable auto sleep whilst charging
2. Send Telegram message when charging starts
- Estimated miles per kWh ( based on miles travelled and delta Accumulative Charge Power )
- Estimaed CO2 saved on fuel since last charged ( based on ICE emissions and electricity supplier )
- Estimated total CO2 saved since purchase ( based on total milage and ICE emissions )
- Details of nearest charger ( ie probabally what is being used to charge car )
- Estimated range ( based on WLTP )
3. Send Telegram message when charging reaches each 10%
5. Send Telegram message when charging stops
"""
def poll():
# enable sleep in case anything goes wrong below
#
enable_sleep()
# load previous status
#
persistance = load()
# check if we are driving or charging
#
driving = get_driving()
if driving == 1 or driving == -1:
if persistance['charging'] == True:
if persistance['soc'] >= 99:
persistance['soc'] = 100
bot_sendtext('Charging *stopped*. Last known State of charge *'+format(persistance['soc'],'.1f')+'%* ('+format(wltp*persistance['soc']/100, '.1f')+' miles) charged '+format(persistance['cec']-persistance['start_cec'],'.1f')+'kWh')
persistance['charging'] = False
save(persistance)
return {'msg': 'Not charging'}
batt_power = get_charging_power()
# avoid fake charging
#
if batt_power <= 0:
return {'msg': 'Not charging - power less than zero'}
# now we are charging
#
disable_sleep()
soc = get_soc()
cec = get_cec()
# alert if just started to charge
#
if persistance['charging'] == False:
last_charge_odo = persistance['odo']
last_charge_soc = persistance['soc']
odo = get_odometer()
persistance['odo'] = odo
persistance['start_cec'] = cec
if last_charge_soc != soc:
mperkwh = (odo-last_charge_odo)/(last_charge_soc*64.0/100.0-soc*64.0/100.0)
else:
mperkwh = 0.0
co2saved = (ice_emissions*(odo-last_charge_odo)*1.609) - electric_emissions*(last_charge_soc*64.0/100.0-soc*64.0/100.0)
bot_sendtext('Estmated *'+format(mperkwh,'.2f')+'mi/kWh* since last charge')
bot_sendtext('*'+format(co2saved/1000,'.2f')+'Kg* CO2 saved since last charge')
bot_sendtext('*'+format(odo*ice_emissions/1000000,'.2f')+'tonnes* CO2 saved in total')
bot_sendtext(nearest_charger())
bot_sendtext('Charging *started* at a rate of '+format(batt_power,'.2f')+'kW. State of charge now *'+format(soc,'.1f')+'%* ('+format(wltp*soc/100, '.1f')+' miles)')
# each 10% alaert
#
for level in xrange(0, 100, 10):
if soc >= level and persistance['soc'] < level:
bot_sendtext('Charging *now* at a rate of '+format(batt_power,'.2f')+'kW. State of charge now *'+format(soc,'.1f')+'%* ('+format(wltp*soc/100, '.1f')+' miles)')
break
# store status for next time
#
persistance['charging'] = True
persistance['soc'] = soc
persistance['cec'] = cec
save(persistance)
return {'msg': 'Charging at '+format(batt_power,'.2f')+'kW, SOC now *'+format(soc,'.1f')+'%*'}
# send message to telegram
#
def bot_sendtext(bot_message):
send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?' + urllib.urlencode({'chat_id': bot_chatID, 'parse_mode': 'Markdown', 'text': unicode(bot_message).encode('utf-8')})
requests.get(send_text)
# load persistance
#
def load():
try:
p = pickle.load( open( 'charge_status.p', 'rb' ) )
except:
p = { 'charging': False, 'soc': 0.0, 'odo': 0, 'cec': 0.0, 'start_cec': 0.0 }
return p
# save persistance
#
def save(p):
pickle.dump( p, open( 'charge_status.p', 'wb' ) )
# delete persistance
#
def delete():
os.remove('charge_status.p')
# dump persistance
#
def dump():
return load()
# check if we are driving. Returns :
# 0 - charging
# 1 - driving
# -1 - can't read data
def get_driving():
try:
args = ['driving']
kwargs = {
'mode': '220',
'pid': '101',
'header': '7E4',
'baudrate': 500000,
'formula': 'bytes_to_int(message.data[53:54])',
'protocol': '6',
'verify': False,
'force': True,
}
# note - sums are done outside of the forumla due to autopi failing
# with 0
#
return (int(__salt__['obd.query'](*args, **kwargs)['value'])&4)/4
except:
return -1
# get charging power
#
def get_charging_power():
args = ['charging_power']
kwargs = {
'mode': '220',
'pid': '101',
'header': '7E4',
'baudrate': 500000,
'formula': '(twos_comp(bytes_to_int(message.data[13:14])*256+bytes_to_int(message.data[14:15]),16)/10.0)*((bytes_to_int(message.data[15:16])*256+bytes_to_int(message.data[16:17]))/10.0)/1000.0',
'protocol': '6',
'verify': False,
'force': True,
}
return __salt__['obd.query'](*args, **kwargs)['value']*-1.0
# get display state of charge
#
def get_soc():
args = ['soc']
kwargs = {
'mode': '220',
'pid': '105',
'header': '7E4',
'baudrate': 500000,
'formula': 'bytes_to_int(message.data[34:35])/2.0',
'protocol': '6',
'verify': False,
'force': True,
}
return __salt__['obd.query'](*args, **kwargs)['value']
# get odometer
#
def get_odometer():
args = ['odometer']
kwargs = {
'mode': '22',
'pid': 'B002',
'header': '7C6',
'baudrate': 500000,
'formula': 'bytes_to_int(message.data[11:12])*16777216+bytes_to_int(message.data[12:13])*65536+bytes_to_int(message.data[13:14])*256+bytes_to_int(message.data[14:15])',
'protocol': '6',
'verify': False,
'force': True,
}
return __salt__['obd.query'](*args, **kwargs)['value']
# get Accumulative Charge Power
#
def get_cec():
args = ['odometer']
kwargs = {
'mode': '220',
'pid': '101',
'header': '7E4',
'baudrate': 500000,
'formula': '(bytes_to_int(message.data[41:42])*16777216+bytes_to_int(message.data[42:43])*65536+bytes_to_int(message.data[43:44])*256+bytes_to_int(message.data[44:45]))/10.0',
'protocol': '6',
'verify': False,
'force': True,
}
return __salt__['obd.query'](*args, **kwargs)['value']
# enable autopi sleep
#
def enable_sleep():
args = ['sleep']
kwargs = {
'enable': True,
}
__salt__['power.sleep_timer'](**kwargs)
# disable autopi sleep
#
def disable_sleep():
args = ['sleep']
kwargs = {
'enable': False,
}
__salt__['power.sleep_timer'](**kwargs)
# get location
#
def get_location():
args = []
kwargs = {}
return __salt__['ec2x.gnss_nmea_gga'](*args, **kwargs)
# get nearest charger
#
def nearest_charger():
location = get_location()
for charger in chargers:
lat1 = radians(charger['latitude'])
lon1 = radians(charger['longitude'])
lat2 = radians(location['latitude'])
lon2 = radians(location['longitude'])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
dist = 6373.0 * c
if dist < 0.02:
log.info('found local charger '+charger['msg'])
return charger['msg']
log.info('https://api.openchargemap.io/v3/poi/?output=json&distance=0.1&maxresults=1&latitude='+str(location['latitude'])+'&longitude='+str(location['longitude']))
result = requests.get('https://api.openchargemap.io/v3/poi/?output=json&distance=0.1&maxresults=1&latitude='+str(location['latitude'])+'&longitude='+str(location['longitude']))
for i in result.json():
return i['OperatorInfo']['Title']+', '+i['AddressInfo']['Title']+', '+i['UsageCost']
return 'No local charger found'
|
the-stack_106_29289 | import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='textfont',
parent_name='scatterpolargl.selected',
**kwargs
):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Textfont',
data_docs="""
color
Sets the text font color of selected points.
""",
**kwargs
)
|
the-stack_106_29290 | import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from train_noff1 import main, create_argparser
import valid_noff1
from multiprocessing import Process
from datetime import datetime
from time import sleep
def hidden_dim_search():
# get default args and manipulate them
args = create_argparser().parse_args([])
args.valid_batches_max = 10
args.save_model = True
args.lambda_srv = 1.0
args.logdir = '/publicwork/adler/melvin/runs'
hidden_dim = [256, 128, 64, 32]
lr = [0.01, 0.02, 0.05, 0.1]
gpu = [0, 1, 2, 3]
proc = []
for h, l, g in zip(hidden_dim, lr, gpu):
args.name = 'h%d_l%d' % (h, int(l*100))
args.lstm_hidden_dim = h
args.snn_hidden_dim = [h, h, h]
args.lr = l
args.gpu = 0 #g
proc.append(Process(target=main, args=(args,)))
proc[-1].start()
[p.join() for p in proc]
def compare_srv_loss():
# get default args and manipulate them
args = create_argparser().parse_args([])
args.valid_batches_max = 10
args.save_model = True
#args.logdir = '/publicwork/adler/melvin/runs'
args.lstm_hidden_dim = 256
args.snn_hidden_dim = [256] * 3
args.epochs = 6
args.lr = 0.01
args.gpu = 0
args.lambda_srv = 1.0
args.srv_loss = 'poisson_binomial'
proc = []
for lambda_srv in [1.0]:
args.name = args.srv_loss + '_%.1f' % lambda_srv
args.lambda_srv = lambda_srv
proc.append(Process(target=main, args=(args,)))
proc[-1].start()
[p.join() for p in proc]
def srvreg():
# get default args and manipulate them
args = create_argparser().parse_args([])
name = datetime.now().strftime('%Y-%m-%dT%H-%M-%S') + '/srvreg_'
args.logdir = '/home/tomte/projects/melvin/runs'
args.lstm_hidden_dim = 256
args.snn_hidden_dim = [256] * 3
args.epochs = 6
args.gpu = 0
srv_loss = ['mse', 'poisson_binomial']
lr = [0.001, 0.017]
task = ['srv', 'srv']
proc = []
for t, s, l in zip(task, srv_loss, lr):
args.name = name + s
args.lr = l
args.srv_loss = s
args.task = t
proc.append(Process(target=main, args=(args,)))
proc[-1].start()
[p.join() for p in proc]
def cross_validation():
name = 'fold%02d'
# training args
args = create_argparser().parse_args([])
args.logdir = '/publicwork/adler/melvin/ccv'
args.lstm_hidden_dim = 1024
args.lr = 0.1
args.batch_size = 512
args.log_interval = 100
args.data = '/local00/bioinf/adler/melvinNoFF1Train.db'
# validation args
valid_args = valid_noff1.create_argparser().parse_args([])
valid_args.batch_size = 4096
valid_args.gpu = 0
valid_args.data = '/local00/bioinf/adler/melvinNoFF1Train.db'
train_job = []
valid_job = []
# start training jobs in parallel on 4 GPUs
for fold in range(11):
args.fold = fold
args.name = name % fold
args.gpu = fold % 4
args.stdout = name % fold + '_train.log'
# start training job
train_job.append(Process(target=main, args=(args,)))
train_job[-1].start()
# wait until initial models are written to file
sleep(15)
# start validation jobs
for fold in range(11):
valid_args.gpu = fold % 4
valid_args.stdout = name % fold + '_valid.log'
valid_args.model_name = os.path.join(args.logdir, name % fold, 'model')
valid_job.append(Process(target=valid_noff1.main, args=(valid_args,)))
valid_job[-1].start()
# wait for training jobs to finish and terminate validation jobs
for t, v in zip(train_job, valid_job):
t.join()
sleep(300) # give validation job another 5 mins to finish
v.terminate()
v.join()
if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
t.close()
v.close()
def fold08_grid_search():
#FIXME: get this to work!!!!
name = 'f08gs_h%d_lr%f_bs%d'
# training args
args = create_argparser().parse_args([])
args.logdir = '/publicwork/adler/melvin/grid'
args.embedding_dim = 512
h = [4096, 2048]
lr = [0.1, 0.1]
bs = [128, 128]
args.log_interval = 100
args.data = '/local00/bioinf/adler/melvinNoFF1Train.db'
args.fold = 8
train_job = []
for i, (h_, lr_, bs_) in enumerate(zip(h, lr, bs)):
args.name = name % (h_, lr_, bs_)
args.lstm_hidden_dim = h_
args.lr = lr_
args.batch_size = bs_
args.gpu = i % 4
args.stdout = os.path.join(args.logdir, args.name, 'train.log')
train_job.append(Process(target=main, args=(args,)))
train_job[-1].start()
print('job started')
sleep(30)
# validation args
valid_args = valid_noff1.create_argparser().parse_args([])
valid_args.batch_size = 4096
valid_args.data = '/local00/bioinf/adler/melvinNoFF1Train.db'
valid_job = []
for i, (h_, lr_, bs_) in enumerate(zip(h, lr, bs)):
valid_args.name = name % (h_, lr_, bs_)
valid_args.gpu = i % 4
valid_args.stdout = os.path.join(args.logdir, valid_args.name, 'valid.log')
valid_args.model_name = os.path.join(args.logdir, valid_args.name, 'model')
valid_job.append(Process(target=valid_noff1.main, args=(valid_args,)))
valid_job[-1].start()
# wait for training jobs to finish
for j in train_job:
j.join()
if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
j.close()
# give validation jobs another 10 mins
sleep(600)
# terminate validation jobs
for j in valid_job:
j.terminate()
j.join()
if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
j.close()
if __name__ == '__main__':
fold08_grid_search()
|
the-stack_106_29291 | # -*- coding: utf-8 -*-
import re
from pathlib import Path
from textwrap import dedent
from unittest.mock import call, patch
import pytest
from jsonschema import ValidationError
from jaffle.config.jaffle_config import JaffleConfig
from jaffle.config.template_string import TemplateString
from jaffle.config.value import ConfigDict
def test_jaffle_config():
conf = JaffleConfig({})
assert conf.namespace == {}
namespace = {'foo': 'FOO', 'bar': 'BAR'}
conf = JaffleConfig(
namespace,
variable={'baz': 'BAZ'},
kernel={'my_kernel': {
'kernel_name': 'python3'
}},
app={
'my_app': {
'class': 'my.app.MyApp',
'logger': {
'suppress_regex': ['pat1 ${foo}', 'pat2'],
'replace_regex': [{
'from': 'pat_from ${foo}',
'to': 'pat_to ${bar}'
}]
}
}
},
process={'my_proc': {
'command': 'my_proc'
}},
job={'my_job': {
'command': 'my_job'
}},
logger={
'level': 'debug',
'suppress_regex': ['global_pat1', 'global_pat2 ${foo}'],
'replace_regex': [{
'from': 'global_pat_from ${bar}',
'to': 'global_pat_to ${foo}'
}]
}
)
assert conf.namespace == namespace
assert conf.variable == ConfigDict({'baz': 'BAZ'}, namespace)
assert conf.kernel == ConfigDict({'my_kernel': {'kernel_name': 'python3'}}, namespace)
assert conf.app == ConfigDict({
'my_app': {
'class': 'my.app.MyApp',
'logger': {
'suppress_regex': ['pat1 ${foo}', 'pat2'],
'replace_regex': [{
'from': 'pat_from ${foo}',
'to': 'pat_to ${bar}'
}]
}
}
}, namespace)
assert conf.process == ConfigDict({'my_proc': {'command': 'my_proc'}}, namespace)
assert conf.job == ConfigDict({'my_job': {'command': 'my_job'}}, namespace)
assert conf.logger == ConfigDict({
'level': 'debug',
'suppress_regex': ['global_pat1', 'global_pat2 ${foo}'],
'replace_regex': [{
'from': 'global_pat_from ${bar}',
'to': 'global_pat_to ${foo}'
}]
}, namespace)
assert conf.app_log_suppress_patterns == {
'my_app': [re.compile('pat1 FOO'), re.compile('pat2')]
}
assert conf.app_log_replace_patterns == {
'my_app': [(re.compile('pat_from FOO'), 'pat_to ${bar}')]
}
pat_to = conf.app_log_replace_patterns['my_app'][0][1]
assert isinstance(pat_to, TemplateString)
assert pat_to.render() == 'pat_to BAR'
assert conf.global_log_suppress_patterns == [
re.compile('global_pat1'), re.compile('global_pat2 FOO')
]
assert conf.global_log_replace_patterns == [
(re.compile('global_pat_from BAR'), 'global_pat_to ${foo}')
]
pat_to = conf.global_log_replace_patterns[0][1]
assert isinstance(pat_to, TemplateString)
assert pat_to.render() == 'global_pat_to FOO'
def test_load():
data1 = {'kernel': {'my_kernel': {'kernel_name': 'python3', 'pass_env': []}}}
ns = {'HOME': '/home/foo'}
variables = {'name': 'foo'}
with patch.object(JaffleConfig, '_load_file', return_value=data1) as load_file:
with patch.object(JaffleConfig, 'create') as create:
config = JaffleConfig.load(['jaffle.hcl'], ns, variables)
assert config is create.return_value
load_file.assert_called_once_with('jaffle.hcl')
create.assert_called_once_with(data1, ns, variables)
data2 = {
'kernel': {
'my_kernel': {
'kernel_name': 'pyspark',
'pass_env': ['HOME']
}
},
'app': {
'my_app': True
}
}
with patch.object(JaffleConfig, '_load_file', side_effect=[data1, data2]) as load_file:
with patch.object(JaffleConfig, 'create') as create:
config = JaffleConfig.load(['jaffle.hcl', 'my_jaffle.hcl'], ns, variables)
assert config is create.return_value
load_file.assert_has_calls([call('jaffle.hcl'), call('my_jaffle.hcl')])
create.assert_called_once_with({
'kernel': {
'my_kernel': {
'kernel_name': 'pyspark',
'pass_env': ['HOME']
}
},
'app': {
'my_app': True
}
}, ns, variables)
data1 = {'kernel': {'my_kernel': {'kernel_name': 'python3', 'invalid_param': True}}}
with patch.object(JaffleConfig, '_load_file', return_value=data1) as load_file:
with patch.object(JaffleConfig, 'create') as create:
with pytest.raises(ValidationError) as e:
JaffleConfig.load(['jaffle.hcl'], {}, {})
assert "'invalid_param' was unexpected" in str(e)
def test_create():
def echo(msg):
return msg
functions = [echo]
with patch('jaffle.config.jaffle_config.functions', functions):
with patch('jaffle.config.jaffle_config.VariablesNamespace') as vn:
with patch.object(JaffleConfig, '__init__', return_value=None) as init:
JaffleConfig.create({
'kernel': {
'my_kernel': {
'kernel_name': 'python3'
}
}
}, {'HOME': '/home/foo'}, {'foo': True})
init.assert_called_once_with({
'HOME': '/home/foo',
'var': vn.return_value,
'echo': echo,
}, kernel={'my_kernel': {'kernel_name': 'python3'}}) # yapf: disable
def test_load_file(tmpdir):
tmp_file = Path(str(tmpdir)) / 'jaffle.hcl'
with tmp_file.open('w') as f:
f.write(
dedent(
'''
kernel "my_kernel" {
kernel_name = "python3"
}
'''
).strip() # yapf: disable
)
data = JaffleConfig._load_file(tmp_file)
assert data == {'kernel': {'my_kernel': {'kernel_name': 'python3'}}}
data = JaffleConfig._load_file(str(tmp_file))
assert data == {'kernel': {'my_kernel': {'kernel_name': 'python3'}}}
|
the-stack_106_29292 | # 🚩 Dada Ki Jay Ho 🚩
import os
import webbrowser
from Resources.UsedForBoth.text_to_speech import sayAndWait
path = ""
def open_folder(cmd:str):
global path
if "open" in cmd and ("folder" in cmd or "drive" in cmd):
if "drive" in cmd:
cmd = cmd.replace("drive", "")
drive_name = cmd[5:].strip()
path = ""
path += drive_name + ":/"
if os.path.isdir(path):
webbrowser.open(path)
else:
sayAndWait("No such drive is available")
else:
cmd = cmd.replace("folder", "")
folder_name = cmd[5:].strip()
path += folder_name + "/"
if os.path.isdir(path):
webbrowser.open(path)
else:
sayAndWait("No such folder is available")
print(path)
|
the-stack_106_29294 | # -*- coding: utf-8 -*-
from simmate.workflow_engine import s3task_to_workflow
from simmate.calculators.vasp.tasks.relaxation import (
Quality01Relaxation as Quality01RelaxationTask,
)
from simmate.calculators.vasp.database.relaxation import (
Quality01Relaxation as Quality01RelaxationResults,
)
workflow = s3task_to_workflow(
name="relaxation/quality01",
module=__name__,
project_name="Simmate-Relaxation",
s3task=Quality01RelaxationTask,
calculation_table=Quality01RelaxationResults,
register_kwargs=["prefect_flow_run_id", "structure", "source"],
)
|
the-stack_106_29295 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
import paddle.fluid.framework as framework
from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard
from simple_nets import simple_fc_net_with_inputs, batchnorm_fc_with_inputs
np.random.seed(123)
class TestCondInputOutput(unittest.TestCase):
def test_return_single_var(self):
"""
pseudocode:
if 0.23 < 0.1:
return 2
else:
return -1
"""
def true_func():
return layers.fill_constant(shape=[2, 3], dtype='int32', value=2)
def false_func():
return layers.fill_constant(shape=[3, 2], dtype='int32', value=-1)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = layers.less_than(y, x)
out = layers.cond(pred, true_func, false_func)
# out is one tensor
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=[out.name])
self.assertTrue(
np.allclose(np.asarray(ret), np.full((3, 2), -1, np.int32)))
def test_return_var_tuple(self):
"""
pseudocode:
if True:
return 1, True
else:
return 3, 2
"""
def true_func():
return layers.fill_constant(
shape=[1, 2], dtype='int32', value=1), layers.fill_constant(
shape=[2, 3], dtype='bool', value=True)
def false_func():
return layers.fill_constant(
shape=[3, 4], dtype='float32', value=3), layers.fill_constant(
shape=[4, 5], dtype='int64', value=2)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
pred = layers.fill_constant(shape=[1], dtype='bool', value=True)
out = layers.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=out)
self.assertTrue(
np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32)))
self.assertTrue(
np.allclose(np.asarray(ret[1]), np.full((2, 3), True, np.bool)))
def test_pass_and_modify_var(self):
"""
pseudocode:
for i in range(5):
a = 7
if i % 2 == 0:
a = a * (i + 1)
else:
a = a - (i - 1)
"""
def true_func(a, i):
a = a * (i + 1)
return a
def false_func(a, i):
a = a - (i - 1)
return a
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7)
i = fluid.data(name="i", shape=[1], dtype='int32')
pred = ((i % 2) == 0)
a = layers.cond(pred, lambda: true_func(a, i),
lambda: false_func(a, i))
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
for feed_i in range(5):
expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i
ret = exe.run(main_program,
feed={'i': np.full((1), feed_i, np.int32)},
fetch_list=[a])
self.assertTrue(
np.allclose(
np.asarray(ret), np.full((3, 2, 1), expected_a, np.int32)))
def test_return_none(self):
"""
pseudocode: test doing nothing in branches
for i in range(5):
if i % 2 == 0:
pass
else:
pass
"""
def true_func():
pass
def false_func():
return None
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = fluid.data(name="i", shape=[1], dtype='int32')
pred = ((i % 2) == 0)
out1 = layers.cond(pred, true_func, false_func)
out2 = layers.cond(pred, None, false_func)
out3 = layers.cond(pred, true_func, None)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
for feed_i in range(5):
# Test that output is None is runnable
exe.run(main_program, feed={'i': np.full((1), feed_i, np.int32)})
self.assertIsNone(out1)
self.assertIsNone(out2)
self.assertIsNone(out3)
def test_wrong_structure_exception(self):
"""
test returning different number of tensors cannot merge into output
"""
def func_return_none():
return None
def func_return_one_tensor():
return layers.fill_constant(shape=[2, 7], dtype='int32', value=3)
def func_return_two_tensors():
return layers.fill_constant(
shape=[3, 1], dtype='int32', value=7), layers.fill_constant(
shape=[3, 1], dtype='int32', value=8)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = fluid.data(name="i", shape=[1], dtype='int32')
pred = ((i % 2) == 0)
with self.assertRaises(Exception) as e:
out = layers.cond(pred, i, func_return_one_tensor)
self.assertEqual("The true_fn in cond must be callable",
str(e.exception))
with self.assertRaises(Exception) as e:
out = layers.cond(pred, func_return_one_tensor, np.asarray([3]))
self.assertEqual("The false_fn in cond must be callable",
str(e.exception))
with self.assertRaises(Exception) as e:
out = layers.cond(pred, func_return_none,
func_return_one_tensor)
self.assertTrue(
"Incompatible return values of true_fn and false_fn in cond" in
str(e.exception))
with self.assertRaises(Exception) as e:
out = layers.cond(pred, func_return_two_tensors,
func_return_none)
self.assertTrue(
"Incompatible return values of true_fn and false_fn in cond" in
str(e.exception))
with self.assertRaises(Exception) as e:
out = layers.cond(pred, func_return_one_tensor,
func_return_two_tensors)
self.assertTrue(
"Incompatible return values of true_fn and false_fn in cond" in
str(e.exception))
def test_extremely_simple_net_with_op_in_condition(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
a = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=1.23)
a.stop_gradient = False
b = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=1.25)
b.stop_gradient = False
out = layers.cond(a - b < -1.0, lambda: a, lambda: b)
append_backward(out)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=[out, a.grad_name, b.grad_name])
# Note: fill_constant has loss of precision, you have to assertEqual
# with values doens't lose precision in float-point number.
self.assertEqual(ret[0][0], 1.25)
self.assertEqual(ret[1][0], 0.0)
self.assertEqual(ret[2][0], 1.0)
class TestCondNestedControlFlow(unittest.TestCase):
def test_cond_inside_cond(self):
"""
pseudocode:
for i in range(1, 10):
a = 2 * i
if i < 5:
if i >= 3:
return a + a
else:
return a - a
else:
if i < 8:
return a * a
else:
return a / a
"""
def less_than_branch(i, a):
return layers.cond(i >= 3.0, lambda: layers.elementwise_add(a, a),
lambda: layers.elementwise_sub(a, a))
def greater_equal_branch(i, a):
return layers.cond(i < 8.0, lambda: layers.elementwise_mul(a, a),
lambda: layers.elementwise_div(a, a))
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = fluid.data(name="i", shape=[1], dtype='float32')
a = 2.0 * i
out = layers.cond(i < 5.0, lambda: less_than_branch(i, a),
lambda: greater_equal_branch(i, a))
mean = layers.mean(out)
append_backward(mean)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
for feed_i in range(0, 10):
expected_a = 2.0 * feed_i
if feed_i < 5:
expected_ret = expected_a + expected_a if feed_i >= 3 else 0.0
expected_a_grad = 2.0 if feed_i >= 3 else 0.0
else:
expected_ret = expected_a * expected_a if feed_i < 8 else 1.0
expected_a_grad = 2.0 * expected_a if feed_i < 8 else 0.0
ret = exe.run(main_program,
feed={'i': np.full((1), feed_i, np.float32)},
fetch_list=[out.name, a.grad_name])
self.assertEqual(ret[0][0], expected_ret)
self.assertEqual(ret[1][0], expected_a_grad)
def test_cond_op_in_condition(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
a = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=1.23)
a.stop_gradient = False
b = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=1.24)
b.stop_gradient = False
out = fluid.layers.cond(
a < b,
lambda: fluid.layers.cond(a - b < -1.0, lambda: fluid.layers.elementwise_add(a, b), lambda: fluid.layers.elementwise_mul(a, b)),
lambda: fluid.layers.cond(a == b, lambda: fluid.layers.elementwise_sub(a, b), lambda: fluid.layers.elementwise_pow(a, b))
)
append_backward(out)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=[out, a.grad_name, b.grad_name])
# Note: fill_constant has loss of precision, so we assertAlmostEqual.
self.assertAlmostEqual(ret[0][0], 1.5252)
self.assertAlmostEqual(ret[1][0], 1.24)
self.assertAlmostEqual(ret[2][0], 1.23)
class TestCondBackward(unittest.TestCase):
def backward_value_helper(self, cond_func):
"""
Helper function that compares calculated backward value is close to dy/dx
"""
main_program = Program()
main_program.random_seed = 123
startup_program = Program()
startup_program.random_seed = 123
with program_guard(main_program, startup_program):
img = fluid.data(name='image', shape=[-1, 9], dtype='float32')
img.stop_gradient = False
label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
i = fluid.data(name="i", shape=[1], dtype='int32')
loss = cond_func(i, img, label)
append_backward(loss)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
delta = 0.005
for feed_i in range(0, 10):
feed_img = np.random.random(size=[1, 9]).astype(np.float32)
feed_label = np.random.randint(
low=0, high=10, size=[1, 1], dtype=np.int64)
img_grad, loss_value = exe.run(
main_program,
feed={
'i': np.full((1), feed_i, np.int32),
'image': feed_img,
'label': feed_label
},
fetch_list=[img.grad_name, loss.name])
numerical_grad = np.zeros(shape=[1, 9], dtype=np.float32)
feed_img_delta = np.copy(feed_img)
for j in range(9):
feed_img_delta[0][j] = feed_img[0][j] + delta
loss_delta = exe.run(main_program,
feed={
'i': np.full((1), feed_i, np.int32),
'image': feed_img_delta,
'label': feed_label
},
fetch_list=[loss.name])
numerical_grad[0][j] = (loss_delta[0] - loss_value[0]) / delta
feed_img_delta[0][j] = feed_img[0][j]
self.assertTrue(
np.isclose(
img_grad, numerical_grad, atol=0.05, rtol=0.05).all())
def add_optimizer_helper(self, cond_func):
"""
Test that program is runnable when add optimizer
"""
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
img = fluid.data(name='image', shape=[-1, 784], dtype='float32')
label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
i = fluid.data(name="i", shape=[1], dtype='int32')
loss = cond_func(i, img, label)
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
optimizer.minimize(loss)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
for feed_i in range(0, 10):
feed_img = np.random.random(size=[16, 784]).astype(np.float32)
feed_label = np.random.randint(
low=0, high=10, size=[16, 1], dtype=np.int64)
exe.run(main_program,
feed={
'i': np.full((1), feed_i, np.int32),
'image': feed_img,
'label': feed_label
},
fetch_list=[loss])
def test_cond_backward(self):
def cond_func(i, img, label):
predicate = ((i % 2) == 0)
return layers.cond(predicate,
lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10))
self.backward_value_helper(cond_func)
self.add_optimizer_helper(cond_func)
def test_half_nested_cond_backward(self):
def branch(i, img, label):
return layers.cond((i % 2) == 0, lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10))
def cond_func_simple_net_at_true(i, img, label):
return layers.cond(i < 5, lambda: branch(i, img, label),
lambda: layers.mean(img))
def cond_func_simple_net_at_false(i, img, label):
return layers.cond(i < 5, lambda: layers.mean(img),
lambda: branch(i, img, label))
self.backward_value_helper(cond_func_simple_net_at_true)
self.add_optimizer_helper(cond_func_simple_net_at_true)
self.backward_value_helper(cond_func_simple_net_at_false)
self.add_optimizer_helper(cond_func_simple_net_at_false)
def test_nested_cond_backward(self):
def branch(i, img, label, mod_two):
if mod_two:
predicate = ((i % 2) == 0)
else:
predicate = ((i % 2) != 0)
return layers.cond(predicate, lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10))
def cond_func(i, img, label):
return layers.cond(i < 5, lambda: branch(i, img, label, True),
lambda: branch(i, img, label, False))
self.backward_value_helper(cond_func)
self.add_optimizer_helper(cond_func)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_29296 | from __future__ import unicode_literals
import os
import shlex
import subprocess
import sys
import syslog
import time
from traceback import format_exception
from cyder.base.mixins import MutexMixin
from cyder.base.utils import (copy_tree, dict_merge, log, run_command,
set_attrs, shell_out)
from cyder.base.vcs import GitRepo
from cyder.core.utils import fail_mail
from cyder.core.ctnr.models import Ctnr
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.vrf.models import Vrf
from cyder.cydhcp.workgroup.models import Workgroup
from cyder.settings import DHCPBUILD
class DHCPBuilder(MutexMixin):
def __init__(self, *args, **kwargs):
kwargs = dict_merge(DHCPBUILD, {
'verbose': True,
'debug': False,
}, kwargs)
set_attrs(self, kwargs)
if self.log_syslog:
syslog.openlog(b'dhcp_build', 0, syslog.LOG_LOCAL6)
self.repo = GitRepo(self.prod_dir, self.line_change_limit,
self.line_removal_limit, debug=self.debug,
log_syslog=self.log_syslog, logger=syslog)
def log_debug(self, msg, to_stderr=None):
if to_stderr is None:
to_stderr = self.debug
log(msg, log_level='LOG_DEBUG', to_syslog=False, to_stderr=to_stderr,
logger=syslog)
def log_info(self, msg, to_stderr=None):
if to_stderr is None:
to_stderr = self.verbose
log(msg, log_level='LOG_INFO', to_syslog=self.log_syslog,
to_stderr=to_stderr, logger=syslog)
def log_notice(self, msg, to_stderr=None):
if to_stderr is None:
to_stderr = self.verbose
log(msg, log_level='LOG_NOTICE', to_syslog=self.log_syslog,
to_stderr=to_stderr, logger=syslog)
def log_err(self, msg, to_stderr=True):
log(msg, log_level='LOG_ERR', to_syslog=self.log_syslog,
to_stderr=to_stderr, logger=syslog)
def run_command(self, command, log=True, failure_msg=None):
if log:
command_logger = self.log_debug
failure_logger = self.log_err
else:
command_logger = None
failure_logger = None
return run_command(command, command_logger=command_logger,
failure_logger=failure_logger,
failure_msg=failure_msg)
def build(self):
try:
with open(self.stop_file) as stop_fd:
now = time.time()
contents = stop_fd.read()
last = os.path.getmtime(self.stop_file)
msg = ('The stop file ({0}) exists. Build canceled.\n'
'Reason for skipped build:\n'
'{1}'.format(self.stop_file, contents))
self.log_notice(msg, to_stderr=False)
if (self.stop_file_email_interval is not None and
now - last > self.stop_file_email_interval):
os.utime(self.stop_file, (now, now))
fail_mail(msg, subject="DHCP builds have stopped")
raise Exception(msg)
except IOError as e:
if e.errno == 2: # IOError: [Errno 2] No such file or directory
pass
else:
raise
self.log_info('Building...')
try:
with open(os.path.join(self.stage_dir, self.target_file), 'w') \
as f:
for ctnr in Ctnr.objects.all():
f.write(ctnr.build_legacy_classes())
for vrf in Vrf.objects.all():
f.write(vrf.build_vrf())
for network in Network.objects.filter(enabled=True):
f.write(network.build_subnet())
for workgroup in Workgroup.objects.all():
f.write(workgroup.build_workgroup())
except:
self.error()
if self.check_file:
self.check_syntax()
self.log_info('DHCP build successful')
def push(self, sanity_check=True):
self.repo.reset_and_pull()
try:
copy_tree(self.stage_dir, self.prod_dir)
except:
self.repo.reset_to_head()
raise
self.repo.commit_and_push('Update config', sanity_check=sanity_check)
def error(self):
ei = sys.exc_info()
exc_msg = ''.join(format_exception(*ei)).rstrip('\n')
self.log_err(
'DHCP build failed.\nOriginal exception: ' + exc_msg,
to_stderr=False)
raise
def check_syntax(self):
out, err, ret = run_command("{0} -t -cf {1}".format(
self.dhcpd, os.path.join(self.stage_dir, self.check_file)
))
if ret != 0:
log_msg = 'DHCP build failed due to a syntax error'
exception_msg = log_msg + ('\n{0} said:\n{1}'
.format(self.dhcpd, err))
self.log_err(log_msg, to_stderr=False)
raise Exception(exception_msg)
def _lock_failure(self, pid):
self.log_err(
'Failed to acquire lock on {0}. Process {1} currently '
'has it.'.format(self.lock_file, pid),
to_stderr=False)
fail_mail(
'An attempt was made to start the DHCP build script while an '
'instance of the script was already running. The attempt was '
'denied.',
subject="Concurrent DHCP builds attempted.")
super(DHCPBuilder, self)._lock_failure(pid)
|
the-stack_106_29297 | from flask import Flask, render_template, request
import jsonify
import requests
import pickle
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = pickle.load(open('random_forest_regression_model.pkl', 'rb'))
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
standard_to = StandardScaler()
@app.route("/predict", methods=['POST'])
def predict():
Fuel_Type_Diesel=0
if request.method == 'POST':
Year = int(request.form['Year'])
Present_Price=float(request.form['Present_Price'])
Kms_Driven=int(request.form['Kms_Driven'])
Kms_Driven2=np.log(Kms_Driven)
Owner=int(request.form['Owner'])
Fuel_Type_Petrol=request.form['Fuel_Type_Petrol']
if(Fuel_Type_Petrol=='Petrol'):
Fuel_Type_Petrol=1
Fuel_Type_Diesel=0
else:
Fuel_Type_Petrol=0
Fuel_Type_Diesel=1
Year=2020-Year
Seller_Type_Individual=request.form['Seller_Type_Individual']
if(Seller_Type_Individual=='Individual'):
Seller_Type_Individual=1
else:
Seller_Type_Individual=0
Transmission_Mannual=request.form['Transmission_Mannual']
if(Transmission_Mannual=='Mannual'):
Transmission_Mannual=1
else:
Transmission_Mannual=0
prediction=model.predict([[Present_Price,Kms_Driven2,Owner,Year,Fuel_Type_Diesel,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Mannual]])
output=round(prediction[0],2)
if output<0:
return render_template('index.html',prediction_texts="Sorry you can't sell this car")
else:
return render_template('index.html',prediction_text="You Can Sell The Car at {}".format(output))
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True)
|
the-stack_106_29298 | # encoding=utf8
# pylint: disable=anomalous-backslash-in-string
import math
__all__ = ['Whitley']
class Whitley(object):
r"""Implementation of Whitley function.
Date: 2018
Authors: Grega Vrbančič and Lucija Brezočnik
License: MIT
Function: **Whitley function**
:math:`f(\mathbf{x}) = \sum_{i=1}^D \sum_{j=1}^D
\left(\frac{(100(x_i^2-x_j)^2 + (1-x_j)^2)^2}{4000} -
\cos(100(x_i^2-x_j)^2 + (1-x_j)^2)+1\right)`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-10.24, 10.24]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (1,...,1)`
LaTeX formats:
Inline:
$f(\mathbf{x}) =
\sum_{i=1}^D \sum_{j=1}^D \left(\frac{(100(x_i^2-x_j)^2 +
(1-x_j)^2)^2}{4000} - \cos(100(x_i^2-x_j)^2 + (1-x_j)^2)+1\right)$
Equation:
\begin{equation}f(\mathbf{x}) =
\sum_{i=1}^D \sum_{j=1}^D \left(\frac{(100(x_i^2-x_j)^2 +
(1-x_j)^2)^2}{4000} - \cos(100(x_i^2-x_j)^2 +
(1-x_j)^2)+1\right) \end{equation}
Domain:
$-10.24 \leq x_i \leq 10.24$
Reference paper:
Jamil, M., and Yang, X. S. (2013).
A literature survey of benchmark functions for global optimisation problems.
International Journal of Mathematical Modelling and Numerical Optimisation,
4(2), 150-194.
"""
def __init__(self, Lower=-10.24, Upper=10.24):
self.Lower = Lower
self.Upper = Upper
@classmethod
def function(cls):
def evaluate(D, sol):
val = 0.0
for i in range(D):
for j in range(D):
temp = 100 * \
math.pow((math.pow(sol[i], 2) - sol[j]), 2) + math.pow(
1 - sol[j], 2)
val += (float(math.pow(temp, 2)) / 4000.0) - \
math.cos(temp) + 1
return val
return evaluate
|
the-stack_106_29299 | import os
def migratoryBirds(arr):
count = [0, 0, 0, 0, 0]
for i in range(len(arr)):
if arr[i] == 1:
count[0] += 1
if arr[i] == 2:
count[1] += 1
if arr[i] == 3:
count[2] += 1
if arr[i] == 4:
count[3] += 1
if arr[i] == 5:
count[4] += 1
return count.index(max(count))+1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = migratoryBirds(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
the-stack_106_29301 | ########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import glob
import os
import shutil
import errno
from datetime import datetime
import unicodedata
from retrying import retry
from cloudify.decorators import workflow
from cloudify.deployment_dependencies import create_deployment_dependency
from cloudify.manager import get_rest_client
from cloudify.workflows import workflow_context
from cloudify.utils import parse_utc_datetime_relative
from cloudify_rest_client.client import CloudifyClient
from cloudify_rest_client.exceptions import CloudifyClientError
from dsl_parser import constants as dsl
from dsl_parser import tasks
def _get_display_name(display_name, settings):
display_name = display_name or settings.get('display_name')
if not display_name:
return
if len(display_name) > 256:
raise ValueError(
'The deployment display name is too long. '
'Maximum allowed length is 256 characters'
)
if any(unicodedata.category(char)[0] == 'C' for char in display_name):
raise ValueError(
'The deployment display name contains illegal characters. '
'Control characters are not allowed'
)
return unicodedata.normalize('NFKC', display_name)
def _parse_plan_datetime(time_expression, base_datetime):
"""
:param time_expression: Either a string representing an absolute
datetime, or a relative time delta, such as '+4 hours' or '+1y+1d'.
:param base_datetime: a datetime object representing the absolute date
and time to which we apply the time delta.
:return: A naive datetime object, in UTC time.
"""
time_fmt = '%Y-%m-%d %H:%M:%S'
if time_expression.startswith('+'):
return parse_utc_datetime_relative(time_expression, base_datetime)
return datetime.strptime(time_expression, time_fmt)
def _create_schedules(client, deployment_id, schedules):
base_time = datetime.utcnow()
for name, spec in schedules.items():
workflow_id = spec.pop('workflow')
if 'since' in spec:
spec['since'] = _parse_plan_datetime(spec['since'], base_time)
if 'until' in spec:
spec['until'] = _parse_plan_datetime(spec['until'], base_time)
if 'workflow_parameters' in spec:
spec['parameters'] = spec.pop('workflow_parameters')
client.execution_schedules.create(
name,
deployment_id=deployment_id,
workflow_id=workflow_id,
**spec
)
def _join_groups(client, deployment_id, groups):
for group_name in groups:
try:
client.deployment_groups.add_deployments(
group_name, deployment_ids=[deployment_id])
except CloudifyClientError as e:
if e.status_code != 404:
raise
client.deployment_groups.put(
group_name, deployment_ids=[deployment_id])
def _get_deployment_labels(new_labels, plan_labels):
labels = {tuple(label) for label in new_labels}
for name, label_spec in plan_labels.items():
labels |= {(name.lower(), value) for value in
label_spec.get('values', [])}
return [{k: v} for k, v in labels]
@workflow
def create(ctx, labels=None, inputs=None, skip_plugins_validation=False,
display_name=None, **_):
client = get_rest_client(tenant=ctx.tenant_name)
bp = client.blueprints.get(ctx.blueprint.id)
deployment_plan = tasks.prepare_deployment_plan(
bp.plan, client.secrets.get, inputs,
runtime_only_evaluation=ctx.deployment.runtime_only_evaluation)
nodes = deployment_plan['nodes']
node_instances = deployment_plan['node_instances']
labels_to_create = _get_deployment_labels(
labels or [],
deployment_plan.get('labels', {}))
ctx.logger.info('Setting deployment attributes')
client.deployments.set_attributes(
ctx.deployment.id,
description=deployment_plan['description'],
workflows=deployment_plan['workflows'],
inputs=deployment_plan['inputs'],
policy_types=deployment_plan['policy_types'],
policy_triggers=deployment_plan['policy_triggers'],
groups=deployment_plan['groups'],
scaling_groups=deployment_plan['scaling_groups'],
outputs=deployment_plan['outputs'],
capabilities=deployment_plan.get('capabilities', {}),
labels=labels_to_create,
)
ctx.logger.info('Creating %d nodes', len(nodes))
client.nodes.create_many(ctx.deployment.id, nodes)
ctx.logger.info('Creating %d node-instances', len(node_instances))
client.node_instances.create_many(ctx.deployment.id, node_instances)
# deployment_settings can depend on labels etc, so we must evaluate
# functions in it after setting labels
deployment_settings = client.evaluate.functions(
ctx.deployment.id,
{},
deployment_plan.get('deployment_settings', {}),
)['payload']
display_name = _get_display_name(display_name, deployment_settings)
if display_name:
client.deployments.set_attributes(
ctx.deployment.id, display_name=display_name)
_join_groups(client, ctx.deployment.id,
deployment_settings.get('default_groups', []))
_create_schedules(client, ctx.deployment.id,
deployment_settings.get('default_schedules', {}))
ctx.logger.info('Creating deployment work directory')
_create_deployment_workdir(
deployment_id=ctx.deployment.id,
tenant=ctx.tenant_name,
logger=ctx.logger)
new_dependencies = deployment_plan.setdefault(
dsl.INTER_DEPLOYMENT_FUNCTIONS, {})
if new_dependencies:
ctx.logger.info('Creating inter-deployment dependencies')
manager_ips = [manager.private_ip
for manager in client.manager.get_managers()]
ext_client, client_config, ext_deployment_id = \
_get_external_clients(nodes, manager_ips)
local_tenant_name = ctx.tenant_name if ext_client else None
local_idds, external_idds = _create_inter_deployment_dependencies(
[manager.private_ip for manager in client.manager.get_managers()],
client_config, new_dependencies, ctx.deployment.id,
local_tenant_name, bool(ext_client), ext_deployment_id)
if local_idds:
client.inter_deployment_dependencies.create_many(
ctx.deployment.id,
local_idds)
if external_idds:
ext_client.inter_deployment_dependencies.create_many(
ctx.deployment.id,
external_idds)
@workflow
def delete(ctx, delete_logs, **_):
ctx.logger.info('Deleting deployment environment: %s', ctx.deployment.id)
_delete_deployment_workdir(ctx)
if delete_logs:
ctx.logger.info("Deleting management workers' logs for deployment %s",
ctx.deployment.id)
_delete_logs(ctx)
def _delete_logs(ctx):
log_dir = os.environ.get('AGENT_LOG_DIR')
if log_dir:
log_file_path = os.path.join(log_dir, 'logs',
'{0}.log'.format(ctx.deployment.id))
if os.path.exists(log_file_path):
try:
with open(log_file_path, 'w') as f:
# Truncating instead of deleting because the logging
# server currently holds a file descriptor open to this
# file. If we delete the file, the logs for new
# deployments that get created with the same deployment
# id, will get written to a stale file descriptor and
# will essentially be lost.
f.truncate()
except IOError:
ctx.logger.warn(
'Failed truncating {0}.'.format(log_file_path),
exc_info=True)
for rotated_log_file_path in glob.glob('{0}.*'.format(
log_file_path)):
try:
os.remove(rotated_log_file_path)
except IOError:
ctx.logger.exception(
'Failed removing rotated log file {0}.'.format(
rotated_log_file_path), exc_info=True)
def _retry_if_file_already_exists(exception):
"""Retry if file already exist exception raised."""
return (
isinstance(exception, OSError) and
exception.errno == errno.EEXIST
)
@workflow_context.task_config(send_task_events=False)
@retry(retry_on_exception=_retry_if_file_already_exists,
stop_max_delay=60000,
wait_fixed=2000)
def _create_deployment_workdir(deployment_id, logger, tenant):
deployment_workdir = _workdir(deployment_id, tenant)
if os.path.exists(deployment_workdir):
# Otherwise we experience pain on snapshot restore
return
os.makedirs(deployment_workdir)
def _delete_deployment_workdir(ctx):
deployment_workdir = _workdir(ctx.deployment.id, ctx.tenant_name)
if not os.path.exists(deployment_workdir):
return
try:
shutil.rmtree(deployment_workdir)
except os.error:
ctx.logger.warning(
'Failed deleting directory %s. Current directory content: %s',
deployment_workdir, os.listdir(deployment_workdir), exc_info=True)
def _workdir(deployment_id, tenant):
return os.path.join('/opt', 'manager', 'resources', 'deployments',
tenant, deployment_id)
def _create_inter_deployment_dependencies(manager_ips: list,
client_config,
new_dependencies: dict,
local_deployment_id: str,
local_tenant_name: str,
external: bool,
ext_deployment_id: str) -> tuple:
local_idds = []
external_idds = []
for func_id, deployment_id_func in new_dependencies.items():
target_deployment_id, target_deployment_func = deployment_id_func
if external:
local_idds += [
create_deployment_dependency(
dependency_creator=func_id,
target_deployment=None,
external_target={
'deployment': (ext_deployment_id
if ext_deployment_id else None),
'client_config': client_config
})]
external_idds += [
create_deployment_dependency(
dependency_creator=func_id,
target_deployment=(target_deployment_id
if target_deployment_id else ' '),
external_source={
'deployment': local_deployment_id,
'tenant': local_tenant_name,
'host': manager_ips,
})]
else:
# It should be safe to assume that if the target_deployment
# is known, there's no point passing target_deployment_func.
# Also because in this case the target_deployment_func will
# be of type string, while REST endpoint expects a dict.
local_idds += [
create_deployment_dependency(
dependency_creator=func_id,
target_deployment=target_deployment_id,
target_deployment_func=(
target_deployment_func if not target_deployment_id
else None)
)]
return local_idds, external_idds
def _get_external_clients(nodes: list, manager_ips: list):
client_config = None
target_deployment = None
for node in nodes:
if node['type'] in ['cloudify.nodes.Component',
'cloudify.nodes.SharedResource']:
client_config = node['properties'].get('client')
target_deployment = node['properties'].get(
'resource_config').get('deployment')
break
external_client = None
if client_config:
internal_hosts = ({'127.0.0.1', 'localhost'} | set(manager_ips))
host = client_config['host']
host = {host} if type(host) == str else set(host)
if not (host & internal_hosts):
external_client = CloudifyClient(**client_config)
return external_client, client_config, \
target_deployment.get('id') if target_deployment else None
@workflow
def update_deployment(ctx, **kwargs):
"""Run an update on this deployment. Any kwargs are passed to the update.
This exposes deployment update creation as a workflow on the deployment.
"""
client = get_rest_client(tenant=ctx.tenant_name)
deployment_update = \
client.deployment_updates.update_with_existing_blueprint(
deployment_id=ctx.deployment.id,
**kwargs
)
ctx.logger.info('Started update of deployment %s: %s',
ctx.deployment.id, deployment_update.id)
|
the-stack_106_29304 | def func1():
import dtlpy as dl
if dl.token_expired():
dl.login()
def func2():
project = dl.projects.create(project_name='project-sdk-tutorial')
project.datasets.create(dataset_name='dataset-sdk-tutorial')
def func3():
project = dl.projects.get(project_name='project-sdk-tutorial')
dataset = project.datasets.get(dataset_name='dataset-sdk-tutorial')
def func4():
import dtlpy as dl
import cv2
import numpy as np
class ImageProcess(dl.BaseServiceRunner):
@staticmethod
def rgb2gray(item: dl.Item):
"""
Function to convert RGB image to GRAY
Will also add a modality to the original item
:param item: dl.Item to convert
:return: None
"""
buffer = item.download(save_locally=False)
bgr = cv2.imdecode(np.frombuffer(buffer.read(), np.uint8), -1)
gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
gray_item = item.dataset.items.upload(local_path=gray,
remote_path='/gray' + item.dir,
remote_name=item.filename)
# add modality
item.modalities.create(name='gray',
ref=gray_item.id)
item.update(system_metadata=True)
@staticmethod
def clahe_equalization(item: dl.Item):
"""
Function to perform histogram equalization (CLAHE)
Will add a modality to the original item
Based on opencv https://docs.opencv.org/4.x/d5/daf/tutorial_py_histogram_equalization.html
:param item: dl.Item to convert
:return: None
"""
buffer = item.download(save_locally=False)
bgr = cv2.imdecode(np.frombuffer(buffer.read(), np.uint8), -1)
# create a CLAHE object (Arguments are optional).
lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
bgr_equalized = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
bgr_equalized_item = item.dataset.items.upload(local_path=bgr_equalized,
remote_path='/equ' + item.dir,
remote_name=item.filename)
# add modality
item.modalities.create(name='equ',
ref=bgr_equalized_item.id)
item.update(system_metadata=True)
def func5():
modules = [dl.PackageModule(name='image-processing-module',
entry_point='main.py',
class_name='ImageProcess',
functions=[dl.PackageFunction(name='rgb2gray',
description='Converting RGB to gray',
inputs=[dl.FunctionIO(type=dl.PackageInputType.ITEM,
name='item')]),
dl.PackageFunction(name='clahe_equalization',
description='CLAHE histogram equalization',
inputs=[dl.FunctionIO(type=dl.PackageInputType.ITEM,
name='item')])
])]
def func6():
src_path = 'functions/opencv_functions'
project = dl.projects.get(project_name='project-sdk-tutorial')
package = project.packages.push(package_name='image-processing',
modules=modules,
src_path=src_path)
def func7():
service = package.services.deploy(service_name='image-processing',
runtime=dl.KubernetesRuntime(concurrency=32),
module_name='image-processing-module')
def func8():
filters = dl.Filters()
filters.add(field='datasetId', values=dataset.id)
trigger = service.triggers.create(name='image-processing2',
function_name='clahe_equalization',
execution_mode=dl.TriggerExecutionMode.ONCE,
resource=dl.TriggerResource.ITEM,
actions=dl.TriggerAction.CREATED,
filters=filters)
def func9():
trigger = service.triggers.create(name='image-processing-rgb',
function_name='rgb2gray',
execution_mode=dl.TriggerExecutionMode.ALWAYS,
resource=dl.TriggerResource.ITEM,
actions=dl.TriggerAction.UPDATED,
filters=filters)
def func10():
item = dataset.items.upload(
local_path=['https://raw.githubusercontent.com/dataloop-ai/tiny_coco/master/images/train2017/000000463730.jpg'])
# Remote path is optional, images will go to the main directory by default
def func11():
service.log()
def func12():
item.open_in_web()
def func13():
service.pause()
|
the-stack_106_29305 | import os
import pandas as pd
import korbinian
from Bio import SeqIO
# import debugging tools
from korbinian.utils import pr, pc, pn, aaa
def create_nonred_uniprot_flatfile_via_uniref(s, uniprot_dir, selected_uniprot_records_flatfile, logging):
""" Creates a non-redundant UniProt flatfile from redundant redundant UniProt tab files, redundant flatfiles and UniRef cluster tab file.
The final output is the selected list of flatfiles, in the uniprot/selected folder (E.g. List08_selected_uniprot_records_flatfile.txt)
Parameters
----------
s : dict
Settings dictionary extracted from excel settings file.
uniprot_dir : str
Path to databases/uniprot folder.
list_number : int
List number (e.g. 8), determining the input and output files.
selected_uniprot_records_flatfile : str
Path to output UniProt flatfile of selected records. E.g. List08_selected_uniprot_records_flatfile.txt
logging : logging.Logger
Logger for printing to console and logfile.
Saved Files and Figures
-----------------------
selected_uniprot_records_flatfile : flatfile
UniProt flatfile of non-redundant sequences.
"""
logging.info('~~~~~~~~~~~~starting create_nonred_uniprot_flatfile_via_uniref~~~~~~~~~~~~')
# load uniref cutoff used (typically 50, for UniRef50)
uniref_cutoff = s["uniref_cluster_cutoff"]
# define path to csv file containing the list of redundant uniprot accessions, e.g. List08_redundant_list_uniprot_acc.tab
redundant_uniprot_acc_tab = os.path.join(uniprot_dir, "List%02d_redundant_list_uniprot_acc.tab" % s["list_number"])
# define path to uniprot flatfile containing the redundant protein records, e.g. List08_redundant_uniprot_flatfile.txt
redundant_uniprot_flatfile = os.path.join(uniprot_dir, "List%02d_redundant_uniprot_flatfile.txt" % s["list_number"])
# define path to the csv file containing the relevant uniref clusters applicable to this list of proteins, e.g. List08_UniRef50_clusters.tab
uniref_clusters_tab = os.path.join(uniprot_dir, "List%02d_UniRef%02d_clusters.tab" % (s["list_number"], uniref_cutoff))
# output uniprot list with redundancy determined
nonred_uniprot_acc_csv = os.path.join(uniprot_dir, "List%02d_nonred_list_uniprot_acc.csv" % s["list_number"])
result = korbinian.prot_list.uniprot_nonredundant.match_list_uniprot_acc_to_uniref_clusters(redundant_uniprot_acc_tab, uniref_clusters_tab, nonred_uniprot_acc_csv, uniref_cutoff, logging)
if result is None:
logging.warning('~~~~~~~~~~~~create_nonred_uniprot_flatfile_via_uniref NOT DONE~~~~~~~~~~~~')
return None
# reopen output file
dfu = pd.read_csv(nonred_uniprot_acc_csv, index_col=0)
# create a list of uniprot accessions that are nonredundant
list_nonred_acc = list(dfu.loc[dfu['nonred'] == True].index)
# create a uniprot flatfile containing only the desired nonredundant accessions
korbinian.prot_list.uniprot_nonredundant.retrieve_selected_uniprot_records_from_flatfile(list_nonred_acc, redundant_uniprot_flatfile, selected_uniprot_records_flatfile, logging)
logging.info('~~~~~~~~~~~~create_nonred_uniprot_flatfile_via_uniref is finished~~~~~~~~~~~~')
return True
def match_list_uniprot_acc_to_uniref_clusters(redundant_uniprot_acc_tab, uniref_clusters_tab, nonred_uniprot_acc_csv, uniref_cutoff, logging):
""" Assigns UniRef clusters to each acc in a list of UniProt accessions.
Takes an input list of redundand UniProt accessions (downloaded in .tab format)
For each accession, finds the respective UniRef cluster number, within a UniRef cluster list (downloaded in .tab format)
[note that to speed things up, we've iteratively removed clusters as they are identified, resulting in
redundant proteins whose UniRef representative is not found]
Creates a boolean for each acc, determining whether it is "nonredundant" (nonred) according to the UniRef clusters.
- preferentially labels the UniRef cluster representative as nonredundant
- if the acc is not the cluster representative, preferentially takes the "reviewed" SwissProt recods
Parameters:
-----------
redundant_uniprot_acc_tab : str
Path to redundant uniprot list of accessions (csv downloaded in .tab format from UniProt website)
uniref_clusters_tab : str
Path to list of UniRef clusters (csv downloaded in .tab format from UniProt website)
nonred_uniprot_acc_csv : str
Path for output file, the uniprot list of accessions with annotated UniRef cluster, and redundancy.
uniref_cutoff : int
UniRef % identity cutoff used for that cluster (either 50, 90 or 100).
logging : logging.Logger
Logger for printing to console and logfile.
Saved Files and Figures
-----------------------
nonred_uniprot_acc_csv : csv
csv with the identified cluster_ID for each protein, and whether or not it is classified as redundant
e.g. List01_nonred_list_uniprot_acc.csv
index = UniProt accession
columns = Protein names Gene names Organism Length nonred cluster_ID
"""
if not os.path.isfile(uniref_clusters_tab):
logging.warning("REDUNDANCY REDUCTION NOT POSSIBLE, no uniref clusters found ({})".format(uniref_clusters_tab))
return None
# create a new dataframe with the uniref csv file, containing the accessions of the reference sequences
dfr = pd.read_table(uniref_clusters_tab)
# to simplify, keep only the columns with the uniprot accessions
# for backwards compatibility, replace old header 'Cluster member(s)' with a consistent column name
dfr.rename(columns={'Cluster member(s)': 'cluster_members', 'Cluster members': 'cluster_members', 'Cluster ID': 'cluster_ID'}, inplace=True)
# to simplify, keep only the columns with the uniprot accessions
dfr = dfr[['cluster_ID', 'cluster_members']]# 'Organisms'
# remove the isoforms, which for some reason are given separate UniRef numbers (SP|P45880, SP|P45880-1, SP|P45880-2 etc)
dfr['cluster_ID'] = dfr['cluster_ID'].apply(lambda x: x[:-2] if "-" in x[-2:] else x)
dfr = dfr.set_index('cluster_ID', drop=False)
# change the cluster_ID to the index
dfr.index.names = ['cluster_ID']
# extract the representative uniprot accession from each cluster_ID
dfr['cluster_rep'] = dfr['cluster_ID'].str[9:]
# convert the list of uniprot accessions in each cluster to a python list format
#dfr['all_acc_in_cluster'] = dfr['cluster_members'].apply(lambda x : [x.strip() for x in x.split(';')])
# delete the original list of clusters
#dfr.drop("cluster_members", axis=1, inplace=True)
if os.path.isfile(redundant_uniprot_acc_tab) == False:
logging.warning('warning, file with nonredundant uniprot acc does not exist : %s' % redundant_uniprot_acc_tab)
raise FileNotFoundError()
# open up large csv containing the redundant list of uniprot acc
dfu = pd.read_table(redundant_uniprot_acc_tab)
# set the uniprot acc as the index
dfu = dfu.set_index('Entry', drop=False)
n_initial_records = dfu.shape[0]
##################################################################################################################
# #
# Use intersection to find accessions that are cluster representatives #
# #
##################################################################################################################
# create a set of cluster representatives (e.g. {'UniRef50_Q7Z7M0', 'UniRef50_Q96I36', 'UniRef50_P20333',....}
cluster_reps_set = set(dfr['cluster_rep'])
# create a set of all the proteins in orig redundant list
red_protein_acc_set = set(dfu.index)
# use intersection to find the acc that are cluster representatives
cluster_reps_acc_set = cluster_reps_set.intersection(red_protein_acc_set)
# annotate the cluster representatives as non-redundant, and add the cluster ID
dfu.loc[cluster_reps_acc_set, "nonred"] = True
dfu.loc[cluster_reps_acc_set, "cluster_ID"] = dfu.loc[cluster_reps_acc_set,:].Entry.apply(lambda x : "UniRef{}_{}".format(uniref_cutoff, x))
# collect a set of the cluster IDs corresponding to the representatives in the list
reps_cluster_ID_set = set(dfu.loc[cluster_reps_acc_set, "cluster_ID"])
n_prot_that_are_ref_seqs = len(reps_cluster_ID_set)
##################################################################################################################
# #
# For non representatives, search each cluster separately to see if it contains the accession #
# #
##################################################################################################################
# OPTIONAL: to make the check faster, drop any of the UniRef clusters whose reference protein was in the non-redundant list
# NOTE: this means that there will be less clusters to search, BUT, you won't find the cluster for proteins who belong
# to a cluster, where the representative protein was in the original list. These will be labelled "not found".
unassigned = set(dfr.index) - reps_cluster_ID_set
dfr_unassigned = dfr.loc[unassigned, :].copy()
n_clusters = dfr.shape[0]
logging.info("Number of initial redundant proteins : {}\n"
"Number of uniref clusters : {}\n"
"Number of proteins that were ref seqs for a cluster : {}".format(n_initial_records, n_clusters, n_prot_that_are_ref_seqs))
##################################################################################################################
# #
# FOR EACH CLUSTER REPRESENTATIVE, GRAB THE INTERSECTION OF CLUSTER ACC and LIST ACC #
# #
##################################################################################################################
# split into accessions and convert to a set
dfr_unassigned["cluster_members"] = dfr_unassigned.cluster_members.str.split("; ")
dfr_unassigned["cluster_members"] = dfr_unassigned["cluster_members"].apply(lambda x : set(x))
# create a set of the accessions that still need to be assigned
set_red_acc = set(dfu.loc[dfu.nonred.isnull()].index)
# make a new column, showing the intersection between the list of acc in that cluster, and the list of acc of unassigned proteins
dfr_unassigned["acc_intersection_cluster_and_unassigned"] = dfr_unassigned["cluster_members"].apply(lambda x: x.intersection(set_red_acc))
# drop any UniRef clusters that don't contain a reference to the remaining unassigned accessions
dfr_unassigned = dfr_unassigned.loc[dfr_unassigned["acc_intersection_cluster_and_unassigned"] != set()]
##################################################################################################################
# #
# Reverse the dataset so the ACC is the index, and the ClusterIDs are the values in a new dataframe #
# #
##################################################################################################################
# create a new dataframe to
df_acc_to_cluster = pd.DataFrame()
# iterate through the unassigned cluster IDs
for cluster_ID in dfr_unassigned.index:
# grab the set of acc for that cluster, which are also in the protein list
acc_intersection_cluster_and_unassigned = dfr_unassigned.loc[cluster_ID, "acc_intersection_cluster_and_unassigned"]
# for some reason, there are sometimes redundant UniRef clusters!!
if isinstance(acc_intersection_cluster_and_unassigned, pd.Series):
# take the first list as the correct one
acc_intersection_cluster_and_unassigned = acc_intersection_cluster_and_unassigned.iloc[0]
# skip the empty sets
if acc_intersection_cluster_and_unassigned != set():
counter = 0
# the multiple acc all have the same cluster ID, which can be assigned in the new dataframe
for acc in acc_intersection_cluster_and_unassigned:
df_acc_to_cluster.loc[acc, "cluster_ID"] = cluster_ID
# IF THERE ARE MORE THAN 2, TRY TO TAKE THE "REVIEWED" PROTEIN FROM SWISSPROT
if len(acc_intersection_cluster_and_unassigned) >= 2:
if "Status" in dfu.columns:
# first make a series whether they are "reviewed" or "unreviewed"
reviewed_ser = dfu.loc[acc_intersection_cluster_and_unassigned, "Status"]
# sort the series so that the "reviewed" proteins are at the top
reviewed_ser.sort_values(inplace=True)
for acc in reviewed_ser.index:
if counter == 0:
# take the first protein, which should be "reviewed", if available
df_acc_to_cluster.loc[acc, "nonred"] = True
else:
# label all following proteins as redundant, to be excluded from the final list
df_acc_to_cluster.loc[acc, "nonred"] = False
counter += 1
else:
# there is no reviewed or unreviewed status in the cluster data
# simply take the first acc as the non-redundant, and mark the others as redundant
for acc in acc_intersection_cluster_and_unassigned:
if counter == 0:
# take the first protein, which should be "reviewed", if available
df_acc_to_cluster.loc[acc, "nonred"] = True
else:
# label all following proteins as redundant, to be excluded from the final list
df_acc_to_cluster.loc[acc, "nonred"] = False
counter += 1
##################################################################################################################
# #
# Join the "nonred" and "cluster_ID" series from the two methods: #
# 1) cluster representatives #
# 2) taking the first (or reviewed) from the non-cluster representatives #
# #
##################################################################################################################
# series 1
nonred_from_reps = dfu["nonred"].dropna()
# series 2
if 'nonred' in df_acc_to_cluster.columns:
nonred_from_cluster_search = df_acc_to_cluster["nonred"].dropna()
else:
nonred_from_cluster_search = df_acc_to_cluster
# join series
complete_series_nonred_annotation = nonred_from_reps.append(nonred_from_cluster_search)
# add to output file
dfu["nonred"] = complete_series_nonred_annotation
# series 1
cluster_IDs_from_reps_ser = dfu["cluster_ID"].dropna()
# series 2, join series
complete_series_cluster_IDs = cluster_IDs_from_reps_ser.append(df_acc_to_cluster["cluster_ID"].dropna())
# add to output file
dfu["cluster_ID"] = complete_series_cluster_IDs
##################################################################################################################
# #
# save list and print record numbers #
# #
##################################################################################################################
# save list after redundancy check
dfu.to_csv(nonred_uniprot_acc_csv)
# log the number of redundant an nonredundant accessions
nonred_value_counts = dfu['nonred'].value_counts()
number_nonredundant_records = nonred_value_counts[True]
#number_total_records = dfu.shape[0]
number_redundant_records = n_initial_records - number_nonredundant_records
if "not_found" in dfu["cluster_ID"].tolist():
number_acc_not_found_in_UniRef = dfu["cluster_ID"].value_counts()["not_found"]
acc_not_found_list = list(dfu["cluster_ID"].isnull().index)
logging.info("{} acc not found in UniRef clusters\n{}".format(number_acc_not_found_in_UniRef, acc_not_found_list))
logging.info('final number of non-redundant records : {}\n({} redundant removed)'.format(number_nonredundant_records, number_redundant_records))
logging.info("~~~~~~~~~~~~match_list_uniprot_acc_to_uniref_clusters is finished~~~~~~~~~~~~")
return True
def retrieve_selected_uniprot_records_from_flatfile(input_accession_list, large_input_uniprot_flatfile, output_flatfile, logging):
'''Function to select records from a large uniprot flatfile, and save them as a smaller flatfile of selected records.
Parameters:
-----------
input_accession_list : list
List of UniProt accessions.
large_input_uniprot_flatfile : str
Path to large UniProt flatfile, from which the UniProt flatfiles will be extracted.
output_flatfile : str
Path to output UniProt flatfile of selected records.
logging : logging.Logger
Logger for printing to console and logfile.
Notes:
------
This script is quite slow for large lists, and large initial flatfiles.
'''
#accession_list = [line.strip() for line in open(input_accession_list, "r")]
#open input flatfile
uniprot_index_handle = SeqIO.index(large_input_uniprot_flatfile, "swiss")
#create an empty list to hold all the accessions that are not in the flatfile
list_acc_not_in_flatfile = []
n = 0
with open(output_flatfile, "wb") as output:
for n, acc in enumerate(input_accession_list):
try:
#get the raw uniprot record, and write to the output file
output.write(uniprot_index_handle.get_raw(acc))
#if the record is not in the file
except KeyError:
list_acc_not_in_flatfile.append(acc)
if n % 50 == 0:
if n!= 0:
logging.info('%i records retrieved from large flatfile' % n)
logging.info('{} records retrieved from large flatfile ({} not found)'.format(n + 1, len(list_acc_not_in_flatfile)))
if len(list_acc_not_in_flatfile) > 0:
logging.info("SwissProt records not found in {}:\n{}.".format(large_input_uniprot_flatfile, list_acc_not_in_flatfile))
logging.info("~~~~~~~~~~~~retrieve_selected_uniprot_records_from_flatfile is finished~~~~~~~~~~~~")
|
the-stack_106_29308 | import datetime
import pytest
from blackbox import config
from blackbox import noop_pg_backup_statements
from blackbox import small_push_dir
from gs_integration_help import default_test_gs_bucket
from os import path
from s3_integration_help import default_test_bucket
from stage_pgxlog import pg_xlog
# Quiet pyflakes about pytest fixtures.
assert config
assert default_test_bucket
assert default_test_gs_bucket
assert noop_pg_backup_statements
assert pg_xlog
assert small_push_dir
def test_wal_push_fetch(pg_xlog, tmpdir, config):
contents = 'abcdefghijlmnopqrstuvwxyz\n' * 10000
seg_name = '00000001' * 3
pg_xlog.touch(seg_name, '.ready')
pg_xlog.seg(seg_name).write(contents)
config.main('wal-push', 'pg_xlog/' + seg_name)
# Recall file and check for equality.
download_file = tmpdir.join('TEST-DOWNLOADED')
config.main('wal-fetch', '-p0', seg_name, str(download_file))
assert download_file.read() == contents
config.main('wal-prefetch', path.dirname(str(download_file)), seg_name)
assert tmpdir.join('.wal-e', 'prefetch', seg_name).check(file=1)
def test_wal_push_parallel(pg_xlog, config, monkeypatch):
from wal_e.worker import upload
old_info = upload.logger.info
class GatherActions(object):
def __init__(self):
self.actions = set()
def __call__(self, *args, **kwargs):
s = kwargs['structured']
self.actions.add((s['action'], s['state']))
return old_info(*args, **kwargs)
ga = GatherActions()
monkeypatch.setattr(upload.logger, 'info', ga)
def seg_name(*parts):
return ''.join(str(p).zfill(8) for p in parts)
segments = [seg_name(1, 1, x) for x in range(1, 4)]
for s in segments:
pg_xlog.touch(s, '.ready')
# Prepare the second segment with *only* a ready file, to make
# sure parallel-push doesn't crash when pg_xlog's file is missing.
pg_xlog.seg(segments[1]).remove()
# This push has enough parallelism that it should attempt all the
# wal segments staged.
config.main('wal-push', '-p8', 'pg_xlog/' + segments[0])
# Ensure all three action types, particularly the "skip" state,
# are encountered.
assert ga.actions == set([('push-wal', 'begin'),
('push-wal', 'skip'),
('push-wal', 'complete')])
# An explicit request to upload a segment that doesn't exist must
# yield a failure.
#
# NB: Normally one would use pytest.raises, but in this case,
# e.value was *sometimes* giving an integer value, and sometimes
# the SystemExit value, whereas the builtin try/except constructs
# appear reliable by comparison.
try:
config.main('wal-push', '-p8', 'pg_xlog/' + segments[1])
except SystemExit as e:
assert e.code == 1
else:
assert False
def test_wal_fetch_non_existent(tmpdir, config):
# Recall file and check for equality.
download_file = tmpdir.join('TEST-DOWNLOADED')
with pytest.raises(SystemExit) as e:
config.main('wal-fetch', '-p0', 'irrelevant', str(download_file))
assert e.value.code == 1
def test_backup_push_fetch(tmpdir, small_push_dir, monkeypatch, config,
noop_pg_backup_statements):
import wal_e.tar_partition
# check that _fsync_files() is called with the right
# arguments. There's a separate unit test in test_tar_hacks.py
# that it actually fsyncs the right files.
fsynced_files = []
monkeypatch.setattr(wal_e.tar_partition, '_fsync_files',
lambda filenames: fsynced_files.extend(filenames))
config.main('backup-push', str(small_push_dir))
fetch_dir = tmpdir.join('fetch-to').ensure(dir=True)
# Spin around backup-fetch LATEST for a while to paper over race
# conditions whereby a backup may not be visible to backup-fetch
# immediately.
from boto import exception
start = datetime.datetime.now()
deadline = start + datetime.timedelta(seconds=15)
while True:
try:
config.main('backup-fetch', str(fetch_dir), 'LATEST')
except exception.S3ResponseError:
if datetime.datetime.now() > deadline:
raise
else:
continue
else:
break
assert fetch_dir.join('arbitrary-file').read() == \
small_push_dir.join('arbitrary-file').read()
for filename in fetch_dir.listdir():
if filename.check(link=0):
assert str(filename) in fsynced_files
elif filename.check(link=1):
assert str(filename) not in fsynced_files
def test_delete_everything(config, small_push_dir, noop_pg_backup_statements):
config.main('backup-push', str(small_push_dir))
config.main('delete', '--confirm', 'everything')
|
the-stack_106_29310 | import numpy as np
import pytest
import taichi as ti
@pytest.mark.skipif(not ti.has_pytorch(), reason='Pytorch not installed.')
@ti.test(exclude=ti.opengl)
def test_ndarray_2d():
n = 4
m = 7
@ti.kernel
def run(x: ti.any_arr(), y: ti.any_arr()):
for i in range(n):
for j in range(m):
x[i, j] += i + j + y[i, j]
a = ti.ndarray(ti.i32, shape=(n, m))
for i in range(n):
for j in range(m):
a[i, j] = i * j
b = np.ones((n, m), dtype=np.int32)
run(a, b)
for i in range(n):
for j in range(m):
assert a[i, j] == i * j + i + j + 1
run(b, a)
for i in range(n):
for j in range(m):
assert b[i, j] == i * j + (i + j + 1) * 2
|
the-stack_106_29312 | import sys
import torch
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import shutil
from torch.autograd import Variable
from torch.utils import data
import os
from dataset import IC15Loader
from metrics import runningScore
import models
from util import Logger, AverageMeter
import time
import util
def ohem_single(score, gt_text, training_mask):
pos_num = (int)(np.sum(gt_text > 0.5)) - (int)(np.sum((gt_text > 0.5) & (training_mask <= 0.5)))
if pos_num == 0:
# selected_mask = gt_text.copy() * 0 # may be not good
selected_mask = training_mask
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_num = (int)(np.sum(gt_text <= 0.5))
neg_num = (int)(min(pos_num * 3, neg_num))
if neg_num == 0:
selected_mask = training_mask
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_score = score[gt_text <= 0.5]
neg_score_sorted = np.sort(-neg_score)
threshold = -neg_score_sorted[neg_num - 1]
selected_mask = ((score >= threshold) | (gt_text > 0.5)) & (training_mask > 0.5)
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
def ohem_batch(scores, gt_texts, training_masks):
scores = scores.data.cpu().numpy()
gt_texts = gt_texts.data.cpu().numpy()
training_masks = training_masks.data.cpu().numpy()
selected_masks = []
for i in range(scores.shape[0]):
selected_masks.append(ohem_single(scores[i, :, :], gt_texts[i, :, :], training_masks[i, :, :]))
selected_masks = np.concatenate(selected_masks, 0)
selected_masks = torch.from_numpy(selected_masks).float()
return selected_masks
def dice_loss(input, target, mask):
input = torch.sigmoid(input)
input = input.contiguous().view(input.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1)
mask = mask.contiguous().view(mask.size()[0], -1)
input = input * mask
target = target * mask
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = (2 * a) / (b + c)
dice_loss = torch.mean(d)
return 1 - dice_loss
def cal_text_score(texts, gt_texts, training_masks, running_metric_text):
training_masks = training_masks.data.cpu().numpy()
pred_text = torch.sigmoid(texts).data.cpu().numpy() * training_masks
pred_text[pred_text <= 0.5] = 0
pred_text[pred_text > 0.5] = 1
pred_text = pred_text.astype(np.int32)
gt_text = gt_texts.data.cpu().numpy() * training_masks
gt_text = gt_text.astype(np.int32)
running_metric_text.update(gt_text, pred_text)
score_text, _ = running_metric_text.get_scores()
return score_text
def cal_kernel_score(kernels, gt_kernels, gt_texts, training_masks, running_metric_kernel):
mask = (gt_texts * training_masks).data.cpu().numpy()
kernel = kernels[:, -1, :, :]
gt_kernel = gt_kernels[:, -1, :, :]
pred_kernel = torch.sigmoid(kernel).data.cpu().numpy()
pred_kernel[pred_kernel <= 0.5] = 0
pred_kernel[pred_kernel > 0.5] = 1
pred_kernel = (pred_kernel * mask).astype(np.int32)
gt_kernel = gt_kernel.data.cpu().numpy()
gt_kernel = (gt_kernel * mask).astype(np.int32)
running_metric_kernel.update(gt_kernel, pred_kernel)
score_kernel, _ = running_metric_kernel.get_scores()
return score_kernel
def train(train_loader, model, criterion, optimizer, epoch):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
running_metric_text = runningScore(2)
running_metric_kernel = runningScore(2)
end = time.time()
for batch_idx, (imgs, gt_texts, gt_kernels, training_masks) in enumerate(train_loader):
data_time.update(time.time() - end)
imgs = Variable(imgs.cuda())
gt_texts = Variable(gt_texts.cuda())
gt_kernels = Variable(gt_kernels.cuda())
training_masks = Variable(training_masks.cuda())
outputs = model(imgs)
texts = outputs[:, 0, :, :]
kernels = outputs[:, 1:, :, :]
selected_masks = ohem_batch(texts, gt_texts, training_masks)
selected_masks = Variable(selected_masks.cuda())
loss_text = criterion(texts, gt_texts, selected_masks)
loss_kernels = []
mask0 = torch.sigmoid(texts).data.cpu().numpy()
mask1 = training_masks.data.cpu().numpy()
selected_masks = ((mask0 > 0.5) & (mask1 > 0.5)).astype('float32')
selected_masks = torch.from_numpy(selected_masks).float()
selected_masks = Variable(selected_masks.cuda())
for i in range(6):
kernel_i = kernels[:, i, :, :]
gt_kernel_i = gt_kernels[:, i, :, :]
loss_kernel_i = criterion(kernel_i, gt_kernel_i, selected_masks)
loss_kernels.append(loss_kernel_i)
loss_kernel = sum(loss_kernels) / len(loss_kernels)
loss = 0.7 * loss_text + 0.3 * loss_kernel
losses.update(loss.item(), imgs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
score_text = cal_text_score(texts, gt_texts, training_masks, running_metric_text)
score_kernel = cal_kernel_score(kernels, gt_kernels, gt_texts, training_masks, running_metric_kernel)
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % 20 == 0:
output_log = '({batch}/{size}) Batch: {bt:.3f}s | TOTAL: {total:.0f}min | ETA: {eta:.0f}min | Loss: {loss:.4f} | Acc_t: {acc: .4f} | IOU_t: {iou_t: .4f} | IOU_k: {iou_k: .4f}'.format(
batch=batch_idx + 1,
size=len(train_loader),
bt=batch_time.avg,
total=batch_time.avg * batch_idx / 60.0,
eta=batch_time.avg * (len(train_loader) - batch_idx) / 60.0,
loss=losses.avg,
acc=score_text['Mean Acc'],
iou_t=score_text['Mean IoU'],
iou_k=score_kernel['Mean IoU'])
print(output_log)
sys.stdout.flush()
return (
losses.avg, score_text['Mean Acc'], score_kernel['Mean Acc'], score_text['Mean IoU'], score_kernel['Mean IoU'])
def adjust_learning_rate(args, optimizer, epoch):
global state
if epoch in args.schedule:
args.lr = args.lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
def save_checkpoint(state, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
def main(args):
if args.checkpoint == '':
args.checkpoint = "checkpoints/ic15_%s_bs_%d_ep_%d" % (args.arch, args.batch_size, args.n_epoch)
if args.pretrain:
if 'synth' in args.pretrain:
args.checkpoint += "_pretrain_synth"
else:
args.checkpoint += "_pretrain_ic17"
print('checkpoint path: %s' % args.checkpoint)
print('init lr: %.8f' % args.lr)
print('schedule: ', args.schedule)
sys.stdout.flush()
if not os.path.isdir(args.checkpoint):
os.makedirs(args.checkpoint)
kernel_num = 7
min_scale = 0.4
start_epoch = 0
data_loader = IC15Loader(is_transform=True, img_size=args.img_size, kernel_num=kernel_num, min_scale=min_scale)
train_loader = torch.utils.data.DataLoader(
data_loader,
batch_size=args.batch_size,
shuffle=True,
num_workers=3,
drop_last=True,
pin_memory=True)
if args.arch == "resnet50":
model = models.resnet50(pretrained=True, num_classes=kernel_num)
elif args.arch == "resnet101":
model = models.resnet101(pretrained=True, num_classes=kernel_num)
elif args.arch == "resnet152":
model = models.resnet152(pretrained=True, num_classes=kernel_num)
model = torch.nn.DataParallel(model).cuda()
if hasattr(model.module, 'optimizer'):
optimizer = model.module.optimizer
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.99, weight_decay=5e-4)
title = 'icdar2015'
if args.pretrain:
print('Using pretrained model.')
assert os.path.isfile(args.pretrain), 'Error: no checkpoint directory found!'
checkpoint = torch.load(args.pretrain)
model.load_state_dict(checkpoint['state_dict'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Train Acc.', 'Train IOU.'])
elif args.resume:
print('Resuming from checkpoint.')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print('Training from scratch.')
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Train Acc.', 'Train IOU.'])
for epoch in range(start_epoch, args.n_epoch):
adjust_learning_rate(args, optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.n_epoch, optimizer.param_groups[0]['lr']))
train_loss, train_te_acc, train_ke_acc, train_te_iou, train_ke_iou = train(train_loader, model, dice_loss,
optimizer, epoch)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'lr': args.lr,
'optimizer': optimizer.state_dict(),
}, checkpoint=args.checkpoint)
logger.append([optimizer.param_groups[0]['lr'], train_loss, train_te_acc, train_te_iou])
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--arch', nargs='?', type=str, default='resnet50')
parser.add_argument('--img_size', nargs='?', type=int, default=640,
help='Height of the input image')
parser.add_argument('--n_epoch', nargs='?', type=int, default=600,
help='# of the epochs')
parser.add_argument('--schedule', type=int, nargs='+', default=[200, 400],
help='Decrease learning rate at these epochs.')
parser.add_argument('--batch_size', nargs='?', type=int, default=16,
help='Batch Size')
parser.add_argument('--lr', nargs='?', type=float, default=1e-3,
help='Learning Rate')
parser.add_argument('--resume', nargs='?', type=str, default=None,
help='Path to previous saved model to restart from')
parser.add_argument('--pretrain', nargs='?', type=str, default=None,
help='Path to previous saved model to restart from')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
args = parser.parse_args()
main(args)
|
the-stack_106_29313 | import logging
import sdk_cmd
LOG = logging.getLogger(__name__)
def add_acls(user: str, task: str, topic: str, zookeeper_endpoint: str, env_str=None):
"""
Add Porducer and Consumer ACLs for the specifed user and topic
"""
_add_role_acls("producer", user, task, topic, zookeeper_endpoint, env_str)
_add_role_acls("consumer --group=*", user, task, topic, zookeeper_endpoint, env_str)
def _add_role_acls(role: str, user: str, task: str, topic: str, zookeeper_endpoint: str, env_str=None):
cmd = "bash -c \"{setup_env}kafka-acls \
--topic {topic_name} \
--authorizer-properties zookeeper.connect={zookeeper_endpoint} \
--add \
--allow-principal User:{user} \
--{role}\"".format(setup_env="{} && ".format(env_str) if env_str else "",
topic_name=topic,
zookeeper_endpoint=zookeeper_endpoint,
user=user,
role=role)
LOG.info("Running: %s", cmd)
output = sdk_cmd.task_exec(task, cmd)
LOG.info(output)
def filter_empty_offsets(offsets: list, additional: list=[]) -> list:
ignored_offsets = [None, {}, {"0": ""}]
ignored_offsets.extend(additional)
LOG.info("Filtering %s from %s", ignored_offsets, offsets)
remaining = [o for o in offsets if o not in ignored_offsets]
LOG.info("Remaining offsets: %s", remaining)
return remaining
|
the-stack_106_29314 | from tkinter import *
from PIL import ImageTk, Image # Picture Processing library
"""
Instatantiate new window and define its properties
"""
# Create Window
root = Tk()
# Create Title
root.title('ME362')
# Define Default Window Size
root.geometry("800x800") #(Width x Height)
# root.state('zoomed') #Fullscreen
"""
Widgets
"""
# Label
aFont = ("Century Gothic", 30)
test_label1 = Label(root, text="This is a label", font=aFont) #(window location, the label)
test_label1.grid(row=0, column=0) #(grid position of the widgets)
test_label2 = Label(root, text="This also a label") #(window location, the label)
test_label2.grid(row=1, column=0) #(grid position of the widgets)
# Frame
frame = LabelFrame(root)
frame.grid(row=0, column=1, padx= 30, pady=32, ipadx=60, ipady=62)
label_inframe = Label(frame, text="This also a label") #(window location, the label)
label_inframe.grid(row=1, column=0) #(grid position of the widgets)
#button
n = 0
def a_function(an_argument):
print(f'current value = {an_argument}')
global n
n += 1
myButton = Button(
root, # Location
text="Button", # Text in the button
font=('Arial', 15), # Font style
command= lambda: a_function(n), # Function ran on click
fg = 'blue', # foreground color (in this case the text)
bg = 'salmon', # background color
activebackground='red' # background color while pressed
)
myButton.grid(row = 2, column = 0)
# Entry Boxes
entry = Entry(
root,
width = 20,
font = aFont,
justify="center",
state='readonly'
)
entry.grid(row=1, column=1)
entry.configure(state='normal')
entry.insert(0, '23')
entry.configure(state='readonly')
value = entry.get()
print(value)
# Check Box
def checkFunction(var):
print(var)
varCheck = StringVar()
varCheck.set("On")
checkBox = Checkbutton(
root,
text='Switch',
onvalue= "On",
offvalue= "Off",
variable=varCheck,
command= lambda: checkFunction(varCheck.get())
)
checkBox.grid(row = 2, column= 1)
# Sliders
def scaleFunction(args):
global slider
global sliderEntry
sliderEntry.configure(state='normal')
sliderEntry.delete(0, END)
sliderEntry.insert(0, f'{10**(slider.get()/10):.2f}')
sliderEntry.configure(state='readonly')
sliderFrame = LabelFrame(root)
sliderFrame.grid(row=3, column=0, padx= 30, pady=32, ipadx=60, ipady=62)
slider = Scale(
sliderFrame,
from_=-20,
to=20,
orient="horizontal",
width=20,
length=200,
showvalue=0,
command=scaleFunction
)
slider.grid(row=4, column=1)
sliderEntry = Entry(
sliderFrame,
width = 10,
borderwidth= 5,
font = ('verdana', 15),
justify="center",
state='readonly'
)
sliderEntry.grid(row=1, column=1)
sliderEntry.configure(state='normal')
sliderEntry.insert(0, 1.00)
sliderEntry.configure(state='readonly')
# Radio Button
radioFrame = LabelFrame(root)
radioFrame.grid(row=4, column=0, padx= 30, pady=32)
colorMapVar = StringVar()
colorMapVar.set('jet')
radioJet = Radiobutton(
radioFrame,
variable=colorMapVar,
text='Jet',
value='jet',
font=('arial',10))
radioCopper = Radiobutton(
radioFrame,
variable=colorMapVar,
text='Copper',
value='copper')
radioCool = Radiobutton(
radioFrame,
variable=colorMapVar,
text='Cool',
value='cool')
'Optionally, use image instead of text beside the radio button'
# imageJet = Image.open('jet.png')
# imageCopper = Image.open('copper.png')
# imageCool = Image.open('cool.png')
# imageJet = ImageTk.PhotoImage(imageJet.resize((100,20)))
# imageCopper = ImageTk.PhotoImage(imageCopper.resize((100,20)))
# imageCool = ImageTk.PhotoImage(imageCool.resize((100,20)))
# radioJet.configure(image=imageJet)
# radioCopper.configure(image=imageCopper)
# radioCool.configure(image=imageCool)
radioJet.grid(row=0, column=0)
radioCopper.grid(row=1, column=0)
radioCool.grid(row=2, column=0)
# Keep it open
root.mainloop() |
the-stack_106_29315 | import collections
import filecmp
import logging
import os
import shutil
import pytest
from mock import patch
from dvc.cache.base import CloudCache
from dvc.dvcfile import DVC_FILE_SUFFIX, PIPELINE_FILE, Dvcfile
from dvc.exceptions import (
CheckoutError,
CheckoutErrorSuggestGit,
ConfirmRemoveError,
DvcException,
NoOutputOrStageError,
)
from dvc.main import main
from dvc.remote.base import Remote
from dvc.repo import Repo as DvcRepo
from dvc.stage import Stage
from dvc.stage.exceptions import StageFileDoesNotExistError
from dvc.system import System
from dvc.tree.local import LocalTree
from dvc.tree.s3 import S3Tree
from dvc.utils import relpath
from dvc.utils.fs import walk_files
from dvc.utils.yaml import dump_yaml, load_yaml
from tests.basic_env import TestDvc, TestDvcGit
from tests.func.test_repro import TestRepro
from tests.remotes import S3
logger = logging.getLogger("dvc")
class TestCheckout(TestRepro):
def setUp(self):
super().setUp()
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.data_dir_stage = stages[0]
self.assertTrue(self.data_dir_stage is not None)
self.orig = "orig"
shutil.copy(self.FOO, self.orig)
os.unlink(self.FOO)
self.orig_dir = "orig_dir"
shutil.copytree(self.DATA_DIR, self.orig_dir)
shutil.rmtree(self.DATA_DIR)
def test(self):
self.dvc.checkout(force=True)
self._test_checkout()
def _test_checkout(self):
self.assertTrue(os.path.isfile(self.FOO))
self.assertTrue(filecmp.cmp(self.FOO, self.orig, shallow=False))
class TestCheckoutSingleStage(TestCheckout):
def test(self):
ret = main(["checkout", "--force", self.foo_stage.path])
self.assertEqual(ret, 0)
ret = main(["checkout", "--force", self.data_dir_stage.path])
self.assertEqual(ret, 0)
self._test_checkout()
class TestCheckoutCorruptedCacheFile(TestRepro):
def test(self):
cache = self.foo_stage.outs[0].cache_path
os.chmod(cache, 0o644)
with open(cache, "a") as fd:
fd.write("1")
with pytest.raises(CheckoutError):
self.dvc.checkout(force=True)
self.assertFalse(os.path.isfile(self.FOO))
self.assertFalse(os.path.isfile(cache))
class TestCheckoutCorruptedCacheDir(TestDvc):
def test(self):
# NOTE: using 'copy' so that cache and link don't have same inode
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
self.dvc = DvcRepo(".")
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.assertEqual(len(stages[0].outs), 1)
out = stages[0].outs[0]
# NOTE: modifying cache file for one of the files inside the directory
# to check if dvc will detect that the cache is corrupted.
entry = self.dvc.cache.local.load_dir_cache(out.checksum)[0]
entry_hash = entry[self.dvc.cache.local.tree.PARAM_CHECKSUM]
cache = os.fspath(
self.dvc.cache.local.tree.hash_to_path_info(entry_hash)
)
os.chmod(cache, 0o644)
with open(cache, "w+") as fobj:
fobj.write("1")
with pytest.raises(CheckoutError):
self.dvc.checkout(force=True)
self.assertFalse(os.path.exists(cache))
class TestCmdCheckout(TestCheckout):
def test(self):
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self._test_checkout()
class CheckoutBase(TestDvcGit):
GIT_IGNORE = ".gitignore"
def commit_data_file(self, fname, content="random text"):
with open(fname, "w") as fd:
fd.write(content)
stages = self.dvc.add(fname)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
self.dvc.scm.add([fname + ".dvc", ".gitignore"])
self.dvc.scm.commit("adding " + fname)
def read_ignored(self):
with open(self.GIT_IGNORE) as f:
return [s.strip("\n") for s in f.readlines()]
def outs_info(self, stage):
FileInfo = collections.namedtuple("FileInfo", "path inode")
paths = [
path
for output in stage["outs"]
for path in self.dvc.tree.walk_files(output["path"])
]
return [
FileInfo(path=path, inode=System.inode(path)) for path in paths
]
class TestRemoveFilesWhenCheckout(CheckoutBase):
def test(self):
fname = "file_in_a_branch"
branch_master = "master"
branch_1 = "b1"
self.dvc.scm.add(self.dvc.scm.untracked_files())
self.dvc.scm.commit("add all files")
# add the file into a separate branch
self.dvc.scm.checkout(branch_1, True)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self.commit_data_file(fname)
# Checkout back in master
self.dvc.scm.checkout(branch_master)
self.assertTrue(os.path.exists(fname))
# Make sure `dvc checkout` removes the file
# self.dvc.checkout()
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(fname))
class TestCheckoutCleanWorkingDir(CheckoutBase):
@patch("dvc.prompt.confirm", return_value=True)
def test(self, mock_prompt):
mock_prompt.return_value = True
stages = self.dvc.add(self.DATA_DIR)
stage = stages[0]
working_dir_change = os.path.join(self.DATA_DIR, "not_cached.txt")
with open(working_dir_change, "w") as f:
f.write("not_cached")
ret = main(["checkout", stage.relpath])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(working_dir_change))
@patch("dvc.prompt.confirm", return_value=False)
def test_force(self, mock_prompt):
mock_prompt.return_value = False
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
stage = stages[0]
working_dir_change = os.path.join(self.DATA_DIR, "not_cached.txt")
with open(working_dir_change, "w") as f:
f.write("not_cached")
ret = main(["checkout", stage.relpath])
self.assertNotEqual(ret, 0)
mock_prompt.assert_called()
self.assertNotEqual(ret, 0)
self.assertRaises(DvcException)
class TestCheckoutSelectiveRemove(CheckoutBase):
def test(self):
# Use copy to test for changes in the inodes
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(0, ret)
stage_path = self.DATA_DIR + DVC_FILE_SUFFIX
stage = load_yaml(stage_path)
staged_files = self.outs_info(stage)
# move instead of remove, to lock inode assigned to stage_files[0].path
# if we were to use remove, we might end up with same inode assigned to
# newly checked out file
shutil.move(staged_files[0].path, "random_name")
ret = main(["checkout", "--force", stage_path])
self.assertEqual(ret, 0)
checkedout_files = self.outs_info(stage)
self.assertEqual(len(staged_files), len(checkedout_files))
self.assertEqual(staged_files[0].path, checkedout_files[0].path)
self.assertNotEqual(staged_files[0].inode, checkedout_files[0].inode)
self.assertEqual(staged_files[1].inode, checkedout_files[1].inode)
class TestGitIgnoreBasic(CheckoutBase):
def test(self):
fname1 = "file_1"
fname2 = "file_2"
fname3 = "file_3"
self.dvc.scm.add(self.dvc.scm.untracked_files())
self.dvc.scm.commit("add all files")
self.assertFalse(os.path.exists(self.GIT_IGNORE))
self.commit_data_file(fname1)
self.commit_data_file(fname2)
self.dvc.run(
single_stage=True,
cmd=f"python {self.CODE} {self.FOO} {fname3}",
deps=[self.CODE, self.FOO],
outs_no_cache=[fname3],
)
self.assertTrue(os.path.exists(self.GIT_IGNORE))
ignored = self.read_ignored()
self.assertEqual(len(ignored), 2)
self.assertIn("/" + fname1, ignored)
self.assertIn("/" + fname2, ignored)
class TestGitIgnoreWhenCheckout(CheckoutBase):
def test(self):
fname_master = "file_in_a_master"
branch_master = "master"
fname_branch = "file_in_a_branch"
branch_1 = "b1"
self.dvc.scm.add(self.dvc.scm.untracked_files())
self.dvc.scm.commit("add all files")
self.commit_data_file(fname_master)
self.dvc.scm.checkout(branch_1, True)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self.commit_data_file(fname_branch)
self.dvc.scm.checkout(branch_master)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
ignored = self.read_ignored()
self.assertEqual(len(ignored), 1)
self.assertIn("/" + fname_master, ignored)
self.dvc.scm.checkout(branch_1)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
ignored = self.read_ignored()
self.assertIn("/" + fname_branch, ignored)
class TestCheckoutMissingMd5InStageFile(TestRepro):
def test(self):
d = load_yaml(self.file1_stage)
del d[Stage.PARAM_OUTS][0][LocalTree.PARAM_CHECKSUM]
del d[Stage.PARAM_DEPS][0][LocalTree.PARAM_CHECKSUM]
dump_yaml(self.file1_stage, d)
with pytest.raises(CheckoutError):
self.dvc.checkout(force=True)
class TestCheckoutEmptyDir(TestDvc):
def test(self):
dname = "empty_dir"
os.mkdir(dname)
stages = self.dvc.add(dname)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertTrue(stage is not None)
self.assertEqual(len(stage.outs), 1)
stage.outs[0].remove()
self.assertFalse(os.path.exists(dname))
stats = self.dvc.checkout(force=True)
assert stats["added"] == [dname + os.sep]
self.assertTrue(os.path.isdir(dname))
self.assertEqual(len(os.listdir(dname)), 0)
class TestCheckoutNotCachedFile(TestDvc):
def test(self):
cmd = "python {} {} {}".format(self.CODE, self.FOO, "out")
self.dvc.add(self.FOO)
stage = self.dvc.run(
cmd=cmd,
deps=[self.FOO, self.CODE],
outs_no_cache=["out"],
single_stage=True,
)
self.assertTrue(stage is not None)
stats = self.dvc.checkout(force=True)
assert not any(stats.values())
class TestCheckoutWithDeps(TestRepro):
def test(self):
os.unlink(self.FOO)
os.unlink(self.file1)
self.assertFalse(os.path.exists(self.FOO))
self.assertFalse(os.path.exists(self.file1))
ret = main(["checkout", "--force", self.file1_stage, "--with-deps"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(self.FOO))
self.assertTrue(os.path.exists(self.file1))
class TestCheckoutDirectory(TestRepro):
def test(self):
stage = self.dvc.add(self.DATA_DIR)[0]
shutil.rmtree(self.DATA_DIR)
self.assertFalse(os.path.exists(self.DATA_DIR))
ret = main(["checkout", stage.path])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(self.DATA_DIR))
class TestCheckoutHook(TestDvc):
@patch("sys.stdout.isatty", return_value=True)
@patch("dvc.prompt.input", side_effect=EOFError)
def test(self, _mock_input, _mock_isatty):
""" Test that dvc checkout handles EOFError gracefully, which is what
it will experience when running in a git hook.
"""
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertNotEqual(stage, None)
self.create(os.path.join(self.DATA_DIR, "test"), "test")
with self.assertRaises(ConfirmRemoveError):
self.dvc.checkout()
class TestCheckoutSuggestGit(TestRepro):
def test(self):
# pylint: disable=no-member
try:
self.dvc.checkout(targets="gitbranch")
except DvcException as exc:
self.assertIsInstance(exc, CheckoutErrorSuggestGit)
self.assertIsInstance(exc.__cause__, NoOutputOrStageError)
self.assertIsInstance(
exc.__cause__.__cause__, StageFileDoesNotExistError
)
try:
self.dvc.checkout(targets=self.FOO)
except DvcException as exc:
self.assertIsInstance(exc, CheckoutErrorSuggestGit)
self.assertIsInstance(exc.__cause__, NoOutputOrStageError)
self.assertIsNone(exc.__cause__.__cause__)
try:
self.dvc.checkout(targets="looks-like-dvcfile.dvc")
except DvcException as exc:
self.assertIsInstance(exc, CheckoutErrorSuggestGit)
self.assertIsInstance(exc.__cause__, StageFileDoesNotExistError)
self.assertIsNone(exc.__cause__.__cause__)
class TestCheckoutTargetRecursiveShouldNotRemoveOtherUsedFiles(TestDvc):
def test(self):
ret = main(["add", self.DATA_DIR, self.FOO, self.BAR])
self.assertEqual(0, ret)
ret = main(["checkout", "-R", self.DATA_DIR])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists(self.FOO))
self.assertTrue(os.path.exists(self.BAR))
class TestCheckoutRecursiveNotDirectory(TestDvc):
def test(self):
ret = main(["add", self.FOO])
self.assertEqual(0, ret)
stats = self.dvc.checkout(targets=[self.FOO + ".dvc"], recursive=True)
assert stats == {"added": [], "modified": [], "deleted": []}
class TestCheckoutMovedCacheDirWithSymlinks(TestDvc):
def test(self):
ret = main(["config", "cache.type", "symlink"])
self.assertEqual(ret, 0)
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
self.assertTrue(System.is_symlink(self.FOO))
old_foo_link = os.path.realpath(self.FOO)
self.assertTrue(System.is_symlink(self.DATA))
old_data_link = os.path.realpath(self.DATA)
old_cache_dir = self.dvc.cache.local.cache_dir
new_cache_dir = old_cache_dir + "_new"
os.rename(old_cache_dir, new_cache_dir)
ret = main(["cache", "dir", new_cache_dir])
self.assertEqual(ret, 0)
ret = main(["checkout", "-f"])
self.assertEqual(ret, 0)
self.assertTrue(System.is_symlink(self.FOO))
new_foo_link = os.path.realpath(self.FOO)
self.assertTrue(System.is_symlink(self.DATA))
new_data_link = os.path.realpath(self.DATA)
self.assertEqual(
relpath(old_foo_link, old_cache_dir),
relpath(new_foo_link, new_cache_dir),
)
self.assertEqual(
relpath(old_data_link, old_cache_dir),
relpath(new_data_link, new_cache_dir),
)
def test_checkout_no_checksum(tmp_dir, dvc):
tmp_dir.gen("file", "file content")
stage = dvc.run(
outs=["file"], no_exec=True, cmd="somecmd", single_stage=True
)
with pytest.raises(CheckoutError):
dvc.checkout([stage.path], force=True)
assert not os.path.exists("file")
@pytest.mark.parametrize(
"link, link_test_func",
[("hardlink", System.is_hardlink), ("symlink", System.is_symlink)],
)
def test_checkout_relink(tmp_dir, dvc, link, link_test_func):
dvc.cache.local.cache_types = [link]
tmp_dir.dvc_gen({"dir": {"data": "text"}})
dvc.unprotect("dir/data")
assert not link_test_func("dir/data")
stats = dvc.checkout(["dir.dvc"], relink=True)
assert stats == empty_checkout
assert link_test_func("dir/data")
@pytest.mark.parametrize("link", ["hardlink", "symlink", "copy"])
def test_checkout_relink_protected(tmp_dir, dvc, link):
dvc.cache.local.cache_types = [link]
tmp_dir.dvc_gen("foo", "foo")
dvc.unprotect("foo")
assert os.access("foo", os.W_OK)
stats = dvc.checkout(["foo.dvc"], relink=True)
assert stats == empty_checkout
# NOTE: Windows symlink perms don't propagate to the target
if link == "copy" or (link == "symlink" and os.name == "nt"):
assert os.access("foo", os.W_OK)
else:
assert not os.access("foo", os.W_OK)
@pytest.mark.parametrize(
"target",
[os.path.join("dir", "subdir"), os.path.join("dir", "subdir", "file")],
)
def test_partial_checkout(tmp_dir, dvc, target):
tmp_dir.dvc_gen({"dir": {"subdir": {"file": "file"}, "other": "other"}})
shutil.rmtree("dir")
stats = dvc.checkout([target])
assert stats["added"] == ["dir" + os.sep]
assert list(walk_files("dir")) == [os.path.join("dir", "subdir", "file")]
empty_checkout = {"added": [], "deleted": [], "modified": []}
def test_stats_on_empty_checkout(tmp_dir, dvc, scm):
assert dvc.checkout() == empty_checkout
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
assert dvc.checkout() == empty_checkout
def test_stats_on_checkout(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{
"dir": {"subdir": {"file": "file"}, "other": "other"},
"foo": "foo",
"bar": "bar",
},
commit="initial",
)
scm.checkout("HEAD~")
stats = dvc.checkout()
assert set(stats["deleted"]) == {"dir" + os.sep, "foo", "bar"}
scm.checkout("-")
stats = dvc.checkout()
assert set(stats["added"]) == {"bar", "dir" + os.sep, "foo"}
tmp_dir.gen({"lorem": "lorem", "bar": "new bar", "dir2": {"file": "file"}})
(tmp_dir / "foo").unlink()
scm.repo.git.rm("foo.dvc")
tmp_dir.dvc_add(["bar", "lorem", "dir2"], commit="second")
scm.checkout("HEAD~")
stats = dvc.checkout()
assert set(stats["modified"]) == {"bar"}
assert set(stats["added"]) == {"foo"}
assert set(stats["deleted"]) == {"lorem", "dir2" + os.sep}
scm.checkout("-")
stats = dvc.checkout()
assert set(stats["modified"]) == {"bar"}
assert set(stats["added"]) == {"dir2" + os.sep, "lorem"}
assert set(stats["deleted"]) == {"foo"}
def test_checkout_stats_on_failure(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"foo": "foo", "dir": {"subdir": {"file": "file"}}, "other": "other"},
commit="initial",
)
stage = Dvcfile(dvc, "foo.dvc").stage
tmp_dir.dvc_gen({"foo": "foobar", "other": "other other"}, commit="second")
# corrupt cache
cache = stage.outs[0].cache_path
os.chmod(cache, 0o644)
with open(cache, "a") as fd:
fd.write("destroy cache")
scm.checkout("HEAD~")
with pytest.raises(CheckoutError) as exc:
dvc.checkout(force=True)
assert exc.value.stats == {
**empty_checkout,
"failed": ["foo"],
"modified": ["other"],
}
def test_stats_on_added_file_from_tracked_dir(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
tmp_dir.gen("dir/subdir/newfile", "newfile")
tmp_dir.dvc_add("dir", commit="add newfile")
scm.checkout("HEAD~")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
scm.checkout("-")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
def test_stats_on_updated_file_from_tracked_dir(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
tmp_dir.gen("dir/subdir/file", "what file?")
tmp_dir.dvc_add("dir", commit="update file")
scm.checkout("HEAD~")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
scm.checkout("-")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
def test_stats_on_removed_file_from_tracked_dir(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
(tmp_dir / "dir" / "subdir" / "file").unlink()
tmp_dir.dvc_add("dir", commit="removed file from subdir")
scm.checkout("HEAD~")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
scm.checkout("-")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
def test_stats_on_show_changes_does_not_show_summary(
tmp_dir, dvc, scm, caplog
):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}}, "other": "other"},
commit="initial",
)
scm.checkout("HEAD~")
with caplog.at_level(logging.INFO, logger="dvc"):
caplog.clear()
assert main(["checkout"]) == 0
for out in ["D\tdir" + os.sep, "D\tother"]:
assert out in caplog.text
assert "modified" not in caplog.text
assert "deleted" not in caplog.text
assert "added" not in caplog.text
def test_stats_does_not_show_changes_by_default(tmp_dir, dvc, scm, caplog):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}}, "other": "other"},
commit="initial",
)
scm.checkout("HEAD~")
with caplog.at_level(logging.INFO, logger="dvc"):
caplog.clear()
assert main(["checkout", "--summary"]) == 0
assert "2 files deleted" in caplog.text
assert "dir" not in caplog.text
assert "other" not in caplog.text
@pytest.mark.parametrize("link", ["hardlink", "symlink", "copy"])
def test_checkout_with_relink_existing(tmp_dir, dvc, link):
tmp_dir.dvc_gen("foo", "foo")
(tmp_dir / "foo").unlink()
tmp_dir.dvc_gen("bar", "bar")
dvc.cache.local.cache_types = [link]
stats = dvc.checkout(relink=True)
assert stats == {**empty_checkout, "added": ["foo"]}
def test_checkout_with_deps(tmp_dir, dvc):
tmp_dir.dvc_gen({"foo": "foo"})
dvc.run(
fname="copy_file.dvc",
cmd="echo foo > bar",
outs=["bar"],
deps=["foo"],
single_stage=True,
)
(tmp_dir / "bar").unlink()
(tmp_dir / "foo").unlink()
stats = dvc.checkout(["copy_file.dvc"], with_deps=False)
assert stats == {**empty_checkout, "added": ["bar"]}
(tmp_dir / "bar").unlink()
stats = dvc.checkout(["copy_file.dvc"], with_deps=True)
assert set(stats["added"]) == {"foo", "bar"}
def test_checkout_recursive(tmp_dir, dvc):
tmp_dir.gen({"dir": {"foo": "foo", "bar": "bar"}})
dvc.add("dir", recursive=True)
(tmp_dir / "dir" / "foo").unlink()
(tmp_dir / "dir" / "bar").unlink()
stats = dvc.checkout(["dir"], recursive=True)
assert set(stats["added"]) == {
os.path.join("dir", "foo"),
os.path.join("dir", "bar"),
}
@pytest.mark.skipif(
not S3.should_test(), reason="Only run with S3 credentials"
)
def test_checkout_for_external_outputs(tmp_dir, dvc):
dvc.cache.s3 = CloudCache(S3Tree(dvc, {"url": S3.get_url()}))
remote = Remote(S3Tree(dvc, {"url": S3.get_url()}))
file_path = remote.tree.path_info / "foo"
remote.tree.s3.put_object(
Bucket=remote.tree.path_info.bucket, Key=file_path.path, Body="foo"
)
dvc.add(str(remote.tree.path_info / "foo"), external=True)
remote.tree.remove(file_path)
stats = dvc.checkout(force=True)
assert stats == {**empty_checkout, "added": [str(file_path)]}
assert remote.tree.exists(file_path)
remote.tree.s3.put_object(
Bucket=remote.tree.path_info.bucket,
Key=file_path.path,
Body="foo\nfoo",
)
stats = dvc.checkout(force=True)
assert stats == {**empty_checkout, "modified": [str(file_path)]}
def test_checkouts_with_different_addressing(tmp_dir, dvc, run_copy):
tmp_dir.gen({"foo": "foo", "lorem": "lorem"})
run_copy("foo", "bar", name="copy-foo-bar")
run_copy("lorem", "ipsum", name="copy-lorem-ipsum")
(tmp_dir / "bar").unlink()
(tmp_dir / "ipsum").unlink()
assert set(dvc.checkout(PIPELINE_FILE)["added"]) == {"bar", "ipsum"}
(tmp_dir / "bar").unlink()
(tmp_dir / "ipsum").unlink()
assert set(dvc.checkout(":")["added"]) == {"bar", "ipsum"}
(tmp_dir / "bar").unlink()
assert dvc.checkout("copy-foo-bar")["added"] == ["bar"]
(tmp_dir / "bar").unlink()
assert dvc.checkout("dvc.yaml:copy-foo-bar")["added"] == ["bar"]
(tmp_dir / "bar").unlink()
assert dvc.checkout(":copy-foo-bar")["added"] == ["bar"]
(tmp_dir / "bar").unlink()
(tmp_dir / "data").mkdir()
with (tmp_dir / "data").chdir():
assert dvc.checkout(relpath(tmp_dir / "dvc.yaml") + ":copy-foo-bar")[
"added"
] == [relpath(tmp_dir / "bar")]
(tmp_dir / "bar").unlink()
assert dvc.checkout("bar")["added"] == ["bar"]
def test_checkouts_on_same_stage_name_and_output_name(tmp_dir, dvc, run_copy):
tmp_dir.gen("foo", "foo")
run_copy("foo", "bar", name="copy-foo-bar")
run_copy("foo", "copy-foo-bar", name="make_collision")
(tmp_dir / "bar").unlink()
(tmp_dir / "copy-foo-bar").unlink()
assert dvc.checkout("copy-foo-bar")["added"] == ["bar"]
assert dvc.checkout("./copy-foo-bar")["added"] == ["copy-foo-bar"]
def test_checkouts_for_pipeline_tracked_outs(tmp_dir, dvc, scm, run_copy):
tmp_dir.gen("foo", "foo")
stage1 = run_copy("foo", "bar", name="copy-foo-bar")
tmp_dir.gen("lorem", "lorem")
stage2 = run_copy("lorem", "ipsum", name="copy-lorem-ipsum")
for out in ["bar", "ipsum"]:
(tmp_dir / out).unlink()
assert dvc.checkout(["bar"])["added"] == ["bar"]
(tmp_dir / "bar").unlink()
assert set(dvc.checkout([PIPELINE_FILE])["added"]) == {"bar", "ipsum"}
for out in ["bar", "ipsum"]:
(tmp_dir / out).unlink()
assert set(dvc.checkout([stage1.addressing])["added"]) == {"bar"}
(tmp_dir / "bar").unlink()
assert set(dvc.checkout([stage2.addressing])["added"]) == {"ipsum"}
(tmp_dir / "ipsum").unlink()
assert set(dvc.checkout()["added"]) == {"bar", "ipsum"}
|
the-stack_106_29319 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.ops import gen_tpu_ops
from tensorflow.contrib.tpu.proto import tpu_embedding_configuration_pb2 as elc
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
TRAINING = elc.TPUEmbeddingConfiguration.TRAINING
INFERENCE = elc.TPUEmbeddingConfiguration.INFERENCE
class TableConfig(
collections.namedtuple(
'TableConfig',
['vocabulary_size', 'dimension', 'initializer', 'combiner'])):
"""Embedding table configuration."""
@experimental
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean'):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. For more information, see
`tf.nn.embedding_lookup_sparse`.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn'):
raise ValueError('Invalid combiner {}'.format(combiner))
return super(TableConfig, cls).__new__(cls, vocabulary_size, dimension,
initializer, combiner)
# TODO(shizhiw): Factor `use_gradient_accumulation` and
# `pipeline_execution_with_tensor_core` out of `_OptimizationParameters`.
class _OptimizationParameters(object):
"""Parameters common to all optimizations."""
def __init__(self, learning_rate, use_gradient_accumulation,
pipeline_execution_with_tensor_core):
self.learning_rate = learning_rate
self.use_gradient_accumulation = use_gradient_accumulation
self.pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
class AdagradParameters(_OptimizationParameters):
"""Optimization parameters for Adagrad."""
def __init__(self, learning_rate, initial_accumulator,
use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
super(AdagradParameters, self).__init__(learning_rate,
use_gradient_accumulation,
pipeline_execution_with_tensor_core)
self.initial_accumulator = initial_accumulator
class AdamParameters(_OptimizationParameters):
"""Optimization parameters for Adam."""
def __init__(self, learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
lazy_adam=True,
sum_inside_sqrt=True,
use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
"""Optimization parameters for Adam.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value.
The exponential decay rate for the 1st moment estimates.
beta2: A float value.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.
Please see `optimization_parameters.proto` for details.
sum_inside_sqrt: This improves training speed. Please see
`optimization_parameters.proto` for details.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
super(AdamParameters, self).__init__(learning_rate,
use_gradient_accumulation,
pipeline_execution_with_tensor_core)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_adam = lazy_adam
self.sum_inside_sqrt = sum_inside_sqrt
class StochasticGradientDescentParameters(_OptimizationParameters):
"""Optimization parameters for stochastic gradient descent.
Args:
learning_rate: a floating point value. The learning rate.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
def __init__(self, learning_rate, use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
super(StochasticGradientDescentParameters, self).__init__(
learning_rate, use_gradient_accumulation,
pipeline_execution_with_tensor_core)
class TPUEmbedding(object):
"""API for using TPU for embedding.
Example:
```
table_config_user = tpu_embedding.TableConfig(
vocabulary_size=4, dimension=2,
initializer=initializer, combiner='mean')
table_to_config_dict = {'video': table_config_video,
'user': table_config_user}
feature_to_table_dict = {'watched': 'video',
'favorited': 'video',
'friends': 'user'}
batch_size = 4
num_hosts = 1
optimization_parameters = tpu_embedding.AdagradParameters(1., 1.)
mode = tpu_embedding.TRAINING
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict, feature_to_table_dict,
batch_size, num_hosts, mode, optimization_parameters)
batch_size_per_core = embedding.batch_size_per_core
sparse_features_list = []
for host in hosts:
with ops.device(host):
for _ in range(embedding.num_cores_per_host):
sparse_features = {}
sparse_features['watched'] = sparse_tensor.SparseTensor(...)
sparse_features['favorited'] = sparse_tensor.SparseTensor(...)
sparse_features['friends'] = sparse_tensor.SparseTensor(...)
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list)
def computation():
activations = embedding.get_activations()
loss = compute_loss(activations)
base_optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=1)
cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer(
base_optimizer)
train_op = cross_shard_optimizer.minimize(loss)
# `train_op` and `send_gradients_op` must happen in order.
with ops.control_dependencies([train_op]):
send_gradients_op = embedding.generate_send_gradients_op()
with ops.control_dependencies([send_gradients_op]):
loss = array_ops.identity(loss)
loss = tpu.shard(computation,
num_shards=embedding.num_cores)
with self.test_session() as sess:
sess.run(tpu.initialize_system(embedding_config=
embedding.config_proto))
sess.run(variables.global_variables_initializer())
sess.run(embedding.init_ops)
sess.run(enqueue_ops)
loss_val = sess.run(loss)
```
"""
# TODO(shizhiw): Instead of `feature_to_table_dict` which maps to table
# name, consider `feature_to_config_dict` which maps to `FeatureConfig`.
# `FeatureConfig` could have fields other than table name. For example, it
# could have a field to indicate that the feature should not be used to
# update embedding table (cr/204852758, cr/204940540). Also, this can support
# different combiners for different features within the same table.
# TODO(shizhiw, b/118512626): Remove `batch_size` from `__init__` and move it
# to `FeatureConfig`?
# TODO(shizhiw): will it be cleaner to make `table_to_config_dict` and
# `feature_to_table_dict` lists of `TableSpec` and `FeatureSpec` respectively?
# TODO(shizhiw): Consider adding `input_fn` as an option to remove boilerplate
# for-loops around construction of inputs.
# `optimization_parameter` applies to all tables. If the need arises,
# we can add `optimization_parameters` to `TableConfig` to override this
# global setting.
@experimental
def __init__(self,
table_to_config_dict,
feature_to_table_dict,
batch_size,
mode,
master,
optimization_parameters=None):
"""API for using TPU for embedding lookups.
Args:
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`. Table refers to an embedding table, e.g. `params`
argument to `tf.nn.embedding_lookup_sparse()`.
feature_to_table_dict: A dictionary mapping from string of feature name
to string of table name. Feature refers to ids to lookup in embedding
table, e.g. `sp_ids` argument to `tf.nn.embedding_lookup_sparse()`.
batch_size: An `int` representing the global batch size.
mode: `TRAINING` or `INFERENCE`.
master: A `string` representing the TensorFlow master to use.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Must be set in training and must
be `None` in inference.
Raises:
ValueError: if any input is invalid.
"""
_validate_table_to_config_dict(table_to_config_dict)
# Avoid nondeterminism from `Dict` iteration order by using `OrderedDict`.
self._table_to_config_dict = _create_ordered_dict(table_to_config_dict)
self._combiners = _create_combiners(self._table_to_config_dict)
_validate_feature_to_table_dict(table_to_config_dict, feature_to_table_dict)
self._feature_to_table_dict = _create_ordered_dict(feature_to_table_dict)
self._table_to_features_dict = _create_table_to_features_dict(
self._feature_to_table_dict)
self._batch_size = batch_size
self._master = master
self._tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(self._master)) # pylint: disable=protected-access
if self._tpu_system_metadata.num_cores == 0:
raise ValueError('TPUEmbedding needs TPUs, but master {} does not have '
'TPUs.'.format(self._master))
self._num_hosts = self._tpu_system_metadata.num_hosts
self._hosts = [device.name for device in self._tpu_system_metadata.devices
if 'device:CPU:' in device.name]
self._num_cores_per_host = self._tpu_system_metadata.num_of_cores_per_host
self._num_cores = self._tpu_system_metadata.num_cores
_validate_batch_size(self._batch_size, self._num_cores)
self._batch_size_per_core = self._batch_size // self._num_cores
self._init_ops = []
# TODO(shizhiw): remove `mode`?
if mode == TRAINING:
_validate_optimization_parameters(optimization_parameters)
self._optimization_parameters = optimization_parameters
elif mode == INFERENCE:
if optimization_parameters is not None:
raise ValueError('`optimization_parameters` should be `None` '
'for inference mode.')
self._optimization_parameters = (
StochasticGradientDescentParameters(1.))
else:
raise ValueError('`mode` only supports {} and {}; got {}.'
.format(TRAINING, INFERENCE, mode))
self._mode = mode
# TODO(shizhiw): move `optimization_parameters` into `_optimizer_handler`
# and create special handler for inference that inherits from
# StochasticGradientDescentHandler with more user-friendly error message
# on get_slot().
self._optimizer_handler = _get_optimization_handler(
self._optimization_parameters)
dummy_table_variables_init_op = self._create_dummy_table_variables()
self._init_ops.append(dummy_table_variables_init_op)
self._config_proto = self._create_config_proto()
self._create_variables_and_ops()
self._init_ops.extend(self._load_parameters_ops)
@property
def hosts(self):
"""A list of device names for CPU hosts.
Returns:
A list of device names for CPU hosts.
"""
return copy.copy(self._hosts)
# TODO(shizhiw): change to num_tensor_cores_per_host to be more explicit and
# to be consistent with `tpu_embedding_configuration.proto`.
@property
def num_cores_per_host(self):
"""Number of TPU cores on a CPU host.
Returns:
Number of TPU cores on a CPU host.
"""
return self._num_cores_per_host
@property
def num_cores(self):
"""Total number of TPU cores on all hosts.
Returns:
Total number of TPU cores on all hosts.
"""
return self._num_cores
@property
def batch_size_per_core(self):
"""Batch size for each TPU core.
The sparse tensors in `sparse_features_list` to `generate_enqueue_ops`
must have batch dimension equal to this.
Returns:
Batch size for each TPU core.
"""
return self._batch_size_per_core
@property
def config_proto(self):
"""Create embedding config proto for `tpu.initialize_system()`.
Returns:
an `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables, which
is passed to `tpu.initialize_system()`.
"""
return self._config_proto
@property
def init_ops(self):
"""Initialization ops for TPU embedding.
It must be called after all global variables have been initialized,
i.e. after `global_variables_initializer()`, as it loads embedding
tables into TPU.
Returns:
A list of ops.
"""
return self._init_ops
# TODO(shizhiw): get table variables the same way as getting slot variables.
@property
def table_to_table_variables_dict(self):
return copy.copy(self._table_to_table_variables_dict)
@property
def feature_to_table_dict(self):
return copy.copy(self._feature_to_table_dict)
def get_slot_names(self):
"""Return a list of the names of slots created by `TPUEmbedding`."""
return self._optimizer_handler.get_slot_names()
def get_slot(self, table, name):
"""Return a slot named `name` create for `table` by `TPUEmbedding`."""
return self._optimizer_handler.get_slot(table, name)
# TODO(shizhiw): expose load to user too?
@property
def retrieve_parameters_ops(self):
return self._retrieve_parameters_ops
def _create_config_proto(self):
"""Create `TPUEmbeddingConfiguration`."""
config_proto = elc.TPUEmbeddingConfiguration()
for table in self._table_to_config_dict:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table
table_config = self._table_to_config_dict[table]
table_descriptor.vocabulary_size = table_config.vocabulary_size
table_descriptor.dimension = table_config.dimension
features_for_table = self._table_to_features_dict[table]
table_descriptor.num_features = len(features_for_table)
table_descriptor.optimization_parameters.learning_rate.constant = (
self._optimization_parameters.learning_rate)
table_descriptor.optimization_parameters.use_gradient_accumulation = (
self._optimization_parameters.use_gradient_accumulation)
self._optimizer_handler.set_optimization_parameters(table_descriptor)
config_proto.mode = self._mode
config_proto.batch_size_per_tensor_core = self._batch_size_per_core
config_proto.num_hosts = self._num_hosts
config_proto.num_tensor_cores = self._num_cores
config_proto.sharding_strategy = elc.TPUEmbeddingConfiguration.DIV_DEFAULT
config_proto.pipeline_execution_with_tensor_core = (
self._optimization_parameters.pipeline_execution_with_tensor_core)
return config_proto
def _create_variables_and_ops(self):
"""Create embedding variables and return ops to load them into TPU."""
self._load_parameters_ops = []
self._retrieve_parameters_ops = []
self._table_to_table_variables_dict = {}
for table in self._table_to_config_dict:
device_fn = _create_device_fn(self._hosts)
with ops.device(device_fn):
# TODO(shizhiw): allow user to specify variable name so that
# they could make the name consistent with CPU etc.
variable_name = table
table_variables = _create_partitioned_variables(
name=variable_name,
num_hosts=self._num_hosts,
vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
embedding_dimension=self._table_to_config_dict[table].dimension,
initializer=self._table_to_config_dict[table].initializer,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
self._table_to_table_variables_dict[table] = table_variables
self._optimizer_handler.create_variables_and_ops(
table, variable_name, self._num_hosts,
self._table_to_config_dict[table], table_variables,
self._load_parameters_ops, self._retrieve_parameters_ops)
def _create_dummy_table_variables(self):
"""Create dummy embedding table variables.
The sole purpose of these dummy variables are to trigger gradient
calcuation wrt them so that the gradients wrt activation can be captured
and later sent to TPU embedding.
Returns:
Initializer for these variables.
Raises:
RuntimeError: if collection to store gradients already exists and is not
empty.
"""
self._dummy_table_variables = []
# TODO(shizhiw): remove table id.
for table_id, table in enumerate(self._table_to_features_dict):
self._dummy_table_variables.append(
variable_scope.get_variable(
'tpu_embedding_dummy_table_variable_%s' % table,
dtype=dtypes.float32,
shape=[1],
use_resource=True,
trainable=True,
# TODO(shizhiw): Remove these dummy variables as
# tensorflow optimizer creates slot variable for them which
# is undesirable.
# e.g. tpu_embedding_dummy_table_variable_mlp_user/Adam{_1}.
# Explicitly specifying collections prevents this variable from
# being added to the GLOBAL_VARIABLES collection, so that Saver()
# ignores it.
collections=['tpu_embedding_dummy_table_variables']))
g = ops.get_default_graph()
table_gradients = g.get_collection_ref(
'tpu_embedding_gradients_table_%d' % table_id)
if table_gradients:
raise RuntimeError(
'tpu_embedding_gradients_table_%d is not empty.' % table_id)
table_gradients.extend([None] * len(self._table_to_features_dict[table]))
return variables.variables_initializer(
self._dummy_table_variables,
name='tpu_embedding_dummy_table_variables_init')
def generate_enqueue_ops(self, sparse_features_list):
"""Generate enqueue ops.
Args:
sparse_features_list: a list of dictionary mapping from string
of feature names to sparse tensor. Each dictionary is for one
TPU core. Dictionaries for the same core should be contiguous
on the list.
Returns:
Ops to enqueue to TPU for embedding.
"""
self._validate_generate_enqueue_ops_sparse_features_list(
sparse_features_list)
return [
self._generate_enqueue_op(
sparse_features, device_ordinal=i % self._num_cores_per_host)
for i, sparse_features in enumerate(sparse_features_list)
]
def _validate_generate_enqueue_ops_sparse_features_list(
self, sparse_features_list):
"""Validate `sparse_features_list`."""
if len(sparse_features_list) != self._num_cores:
raise ValueError('Length of `sparse_features_list` should match the '
'number of cores; '
'`len(sparse_features_list)` is {}, '
'number of cores is {}.'.format(
len(sparse_features_list), self._num_cores))
feature_set = set(self._feature_to_table_dict.keys())
contiguous_device = None
for i, sparse_features in enumerate(sparse_features_list):
used_feature_set = set(sparse_features.keys())
# Check features are valid.
missing_feature_set = feature_set - used_feature_set
if missing_feature_set:
raise ValueError('`sparse_features_list[{}]` misses a feature that is '
'in `feature_to_config_dict`: {}.'.format(
i, missing_feature_set))
extra_feature_set = used_feature_set - feature_set
if extra_feature_set:
raise ValueError('`sparse_features_list[{}]` has a feature that is not '
'in `feature_to_config_dict`: {}.'.format(
i, extra_feature_set))
device = None
device_feature = None
for feature, tensor in six.iteritems(sparse_features):
if not isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('`sparse_features_list[{}]` has a feature that is '
'not mapped to `SparseTensor`. '
'`feature`: {}, type: {}'.format(
i, feature, type(tensor)))
# Check all features are on the same device.
if device is None:
device = tensor.op.device
device_feature = feature
else:
if device != tensor.op.device:
raise ValueError('Devices are different between features in '
'`sparse_features_list[{}]`; '
'devices: {}, {}; features: {}, {}.'.format(
i, device, tensor.op.device, feature,
device_feature))
if i % self._num_cores_per_host:
if device != contiguous_device:
raise ValueError('We expect the `sparse_features` which are on the '
'same host to be contiguous in '
'`sparse_features_list`, '
'`sparse_features_list[{}]` is on device {}, '
'but is expected to be on device {}.'.format(
i, device, contiguous_device))
else:
contiguous_device = device
def _generate_enqueue_op(self, sparse_features, device_ordinal):
with ops.colocate_with(list(sparse_features.values())[0]):
sample_idcs, embedding_idcs, aggregation_weights = (
self._format_for_tpu_embedding_sparse_batch(sparse_features))
return tpu_ops.enqueue_tpu_embedding_sparse_batch(
sample_idcs,
embedding_idcs,
aggregation_weights,
combiners=self._combiners,
device_ordinal=device_ordinal)
def _format_for_tpu_embedding_sparse_batch(self, sparse_features):
"""Format sparse features for `enqueue_tpu_embedding_sparse_batch()`.
Args:
sparse_features: a `Dict` of `SparseTensor`s for embedding.
Returns:
Arguments for `enqueue_tpu_embedding_sparse_batch()`.
"""
sample_idcs, embedding_idcs, aggregation_weights = list(), list(), list()
for table in self._table_to_features_dict:
sample_t, indices_t, weights_t = list(), list(), list()
features = self._table_to_features_dict[table]
for i, feature in enumerate(features):
tensor = sparse_features[feature]
sample_indices = tensor.indices[:, 0]
embedding_indices = tensor.values
weights = array_ops.ones_like(embedding_indices)
sample_t.append(i * self._batch_size_per_core + sample_indices)
indices_t.append(embedding_indices)
weights_t.append(weights)
sample_idcs.append(
math_ops.cast(array_ops.concat(sample_t, axis=0), dtype=dtypes.int32))
embedding_idcs.append(
math_ops.cast(
array_ops.concat(indices_t, axis=0), dtype=dtypes.int32))
aggregation_weights.append(
math_ops.cast(
array_ops.concat(weights_t, axis=0), dtype=dtypes.float32))
return sample_idcs, embedding_idcs, aggregation_weights
def get_activations(self):
"""Get activations for features.
This should be called within `computation` that is passed to
`tpu.replicate` and friends.
Returns:
A dictionary mapping from `String` of feature name to `Tensor`
of activation.
"""
recv_activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_to_config_dict),
config=self._config_proto.SerializeToString())
activations = collections.OrderedDict()
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for lookup_id, feature in enumerate(features):
start_row = lookup_id * self._batch_size_per_core
end_row = start_row + self._batch_size_per_core
activations[feature] = gen_tpu_ops.tpu_embedding_activations(
self._dummy_table_variables[table_id],
recv_activations[table_id][start_row:end_row, :],
table_id=table_id,
lookup_id=lookup_id)
return activations
# TODO(shizhiw): Make `gradient_multiplier` per feature. Setting it to 0 would
# have the effect of `tf.stop_gradients()`.
# TODO(shizhiw): Consider alternative ways to capture gradients wrt embedding
# layer outputs to remove `_dummy_table_variables`,
# `_embedding_activation_grad` and `tpu_embedding_gradients_table_%d'.
def generate_send_gradients_op(self, gradient_multipliers=None):
"""Retrieve gradients from collections and send them to TPU embedding.
Args:
gradient_multipliers: None, or dict mapping table names to gradient
multiplier Tensors.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
ValueError: If required gradients have not been defined.
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
g = ops.get_default_graph()
gradients = list()
for table_id, table in enumerate(self._table_to_config_dict):
table_gradients = g.get_collection(
'tpu_embedding_gradients_table_%d' % table_id)
if any(gradient is None for gradient in table_gradients):
raise ValueError(
'Table {}/{} has undefined gradients: this is probably because the '
'model asked TPUEmbedding to compute activations that were not '
'used.'.format(table_id, table))
concat_table_grads = array_ops.concat(table_gradients, axis=0)
if gradient_multipliers is not None:
concat_table_grads *= gradient_multipliers[table.name]
gradients.append(concat_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients, config=self.config_proto.SerializeToString())
def _validate_table_to_config_dict(table_to_config_dict):
"""Validate `table_to_config_dict`."""
for k, v in six.iteritems(table_to_config_dict):
if not isinstance(v, TableConfig):
raise ValueError('Value of `table_to_config_dict` must be of type '
'`TableConfig`, got {} for {}.'.format(type(v), k))
def _validate_feature_to_table_dict(table_to_config_dict,
feature_to_table_dict):
"""Validate `feature_to_table_dict`."""
used_table_set = set(feature_to_table_dict.values())
table_set = set(table_to_config_dict.keys())
unused_table_set = table_set - used_table_set
if unused_table_set:
raise ValueError('`table_to_config_dict` specifies table that is not '
'used in `feature_to_table_dict`: {}.'
.format(unused_table_set))
extra_table_set = used_table_set - table_set
if extra_table_set:
raise ValueError('`feature_to_table_dict` refers to a table that is not '
'specified in `table_to_config_dict`: {}.'
.format(extra_table_set))
def _validate_batch_size(batch_size, num_cores):
if batch_size % num_cores:
raise ValueError('`batch_size` is not a multiple of number of '
'cores. `batch_size`={}, `_num_cores`={}.'.format(
batch_size, num_cores))
def _validate_optimization_parameters(optimization_parameters):
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationPramaters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
class _OptimizerHandler(object):
"""Interface class for handling optimizer specific logic."""
def __init__(self, optimization_parameters):
self._optimization_parameters = optimization_parameters
def set_optimization_parameters(self, table_descriptor):
raise NotImplementedError()
def create_variables_and_ops(self, table, variable_name):
raise NotImplementedError()
def get_slot_names(self):
raise NotImplementedError()
def get_slot(self, table, name):
raise NotImplementedError()
class _AdagradHandler(_OptimizerHandler):
"""Handles Adagrad specific logic."""
def __init__(self, optimization_parameters):
super(_AdagradHandler, self).__init__(optimization_parameters)
self._table_to_accumulator_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adagrad.SetInParent()
def create_variables_and_ops(self, table, variable_name, num_hosts,
table_config, table_variables,
load_parameters_ops, retrieve_parameters_ops):
optimizer_name = 'Adagrad'
accumulator_initializer = init_ops.constant_initializer(
self._optimization_parameters.initial_accumulator)
accumulator_variables = _create_partitioned_variables(
name='%s/%s' % (variable_name, optimizer_name),
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=accumulator_initializer)
self._table_to_accumulator_variables_dict[table] = accumulator_variables
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adagrad_parameters(
parameters=table_variable,
accumulators=accumulator_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieved_table, retrieved_accumulator = (
tpu_ops.retrieve_tpu_embedding_adagrad_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(accumulator_variable, retrieved_accumulator))
load_parameters_ops.append(load_parameters_op)
retrieve_parameters_ops.append(retrieve_parameters_op)
def get_slot_names(self):
return ['accumulator']
def get_slot(self, table, name):
if name not in self.get_slot_names():
raise ValueError('Adagrad has {} as slot names; got {}.'
.format(self.get_slot_names(), name))
return self._table_to_accumulator_variables_dict[table]
class _AdamHandler(_OptimizerHandler):
"""Handles Adam specific logic."""
def __init__(self, optimization_parameters):
super(_AdamHandler, self).__init__(optimization_parameters)
self._table_to_m_variables_dict = {}
self._table_to_v_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adam.beta1 = (
self._optimization_parameters.beta1)
table_descriptor.optimization_parameters.adam.beta2 = (
self._optimization_parameters.beta2)
table_descriptor.optimization_parameters.adam.epsilon = (
self._optimization_parameters.epsilon)
table_descriptor.optimization_parameters.adam.use_non_lazy_adam = (
not self._optimization_parameters.lazy_adam)
table_descriptor.optimization_parameters.adam.use_sum_inside_sqrt = (
self._optimization_parameters.sum_inside_sqrt)
def create_variables_and_ops(self, table, variable_name, num_hosts,
table_config, table_variables,
load_parameters_ops, retrieve_parameters_ops):
optimizer_name = 'Adam'
m_initializer = init_ops.zeros_initializer()
m_variables = _create_partitioned_variables(
name='%s/%s/m' % (variable_name, optimizer_name),
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=m_initializer)
v_initializer = init_ops.zeros_initializer()
v_variables = _create_partitioned_variables(
name='%s/%s/v' % (variable_name, optimizer_name),
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=v_initializer)
self._table_to_m_variables_dict[table] = m_variables
self._table_to_v_variables_dict[table] = v_variables
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adam_parameters(
parameters=table_variable,
momenta=m_variable,
velocities=v_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieved_table, retrieved_m, retrieved_v = (
tpu_ops.retrieve_tpu_embedding_adam_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(m_variable, retrieved_m),
state_ops.assign(v_variable, retrieved_v))
load_parameters_ops.append(load_parameters_op)
retrieve_parameters_ops.append(retrieve_parameters_op)
def get_slot_names(self):
return ['m', 'v']
def get_slot(self, table, name):
if name == 'm':
return self._table_to_m_variables_dict[table]
elif name == 'v':
return self._table_to_v_variables_dict[table]
else:
raise ValueError('Adam has {} as slot names; got {}.'
.format(self.get_slot_names(), name))
class _StochasticGradientDescentHandler(_OptimizerHandler):
"""Handles stochastic gradient descent specific logic."""
def set_optimization_parameters(self, table_descriptor):
(table_descriptor.optimization_parameters.stochastic_gradient_descent
.SetInParent())
def create_variables_and_ops(self, table, variable_name, num_hosts,
table_config, table_variables,
load_parameters_ops, retrieve_parameters_ops):
del table_config
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops
.load_tpu_embedding_stochastic_gradient_descent_parameters(
parameters=table_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieved_table = (
tpu_ops
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table))
load_parameters_ops.append(load_parameters_op)
retrieve_parameters_ops.append(retrieve_parameters_op)
def get_slot_names(self):
return []
def get_slot(self, table, name):
raise ValueError('Stochastic gradient descent does not have slot variable.')
def _get_optimization_handler(optimization_parameters):
if isinstance(optimization_parameters, AdagradParameters):
return _AdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, AdamParameters):
return _AdamHandler(optimization_parameters)
elif isinstance(optimization_parameters, StochasticGradientDescentParameters):
return _StochasticGradientDescentHandler(optimization_parameters)
else:
return NotImplementedError()
def _create_ordered_dict(d):
"""Create an OrderedDict from Dict."""
return collections.OrderedDict((k, d[k]) for k in sorted(d))
def _create_combiners(table_to_config_dict):
return [table_to_config_dict[t].combiner for t in table_to_config_dict]
def _create_table_to_features_dict(feature_to_table_dict):
"""Create mapping from table to a list of its features."""
table_to_features_dict_tmp = {}
for feature, table in six.iteritems(feature_to_table_dict):
if table in table_to_features_dict_tmp:
table_to_features_dict_tmp[table].append(feature)
else:
table_to_features_dict_tmp[table] = [feature]
table_to_features_dict = collections.OrderedDict()
for table in sorted(table_to_features_dict_tmp):
table_to_features_dict[table] = sorted(table_to_features_dict_tmp[table])
return table_to_features_dict
def _create_device_fn(hosts):
"""Create device_fn() to use with _create_partitioned_variables()."""
def device_fn(op):
"""Returns the `device` for `op`."""
part_match = re.match(r'.*/part_(\d+)(/|$)', op.name)
if part_match:
idx = int(part_match.group(1))
else:
raise RuntimeError('Internal Error: '
'Expected %s to contain /part_*.' % op.name)
device = hosts[idx]
return device
return device_fn
def _create_partitioned_variables(name,
num_hosts,
vocabulary_size,
embedding_dimension,
initializer,
collections=None): # pylint: disable=redefined-outer-name
"""Creates ParitionedVariables based on `num_hosts` for `table`."""
# TODO(shizhiw): automatically place embedding lookup elsewhere?
if vocabulary_size < num_hosts:
raise ValueError('`vocabulary_size`({}) is smaller than `num_hosts`({}). '
'As TPU embedding is not optimized for small tables, '
'please consider other ways for this embedding lookup.')
return list(variable_scope.get_variable(
name,
shape=(vocabulary_size, embedding_dimension),
partitioner=partitioned_variables.fixed_size_partitioner(num_hosts),
dtype=dtypes.float32,
initializer=initializer,
collections=collections,
trainable=False))
|
the-stack_106_29323 | import turtle
import ball
num_balls = int(input("Number of balls to simulate: "))
turtle.speed(0)
turtle.tracer(0)
turtle.hideturtle()
canvas_width = turtle.screensize()[0]
canvas_height = turtle.screensize()[1]
ball_radius = 0.05 * canvas_width
turtle.colormode(255)
color_list = []
xpos = []
ypos = []
vx = []
vy = []
ball_color = []
ball.initilizing(xpos, ypos, vx, vy, ball_color, canvas_width, canvas_height, ball_radius, num_balls)
while (True):
turtle.clear()
for i in range(num_balls):
ball.draw_circle(ball_color[i], ball_radius, xpos[i], ypos[i])
ball.move_circle(i, xpos, ypos, vx, vy, canvas_width, canvas_height, ball_radius)
turtle.update()
# hold the window; close it by clicking the window close 'x' mark
turtle.done()
|
the-stack_106_29330 | import numpy as np
from perfect_information_game.tablebases import SymmetryTransform
from perfect_information_game.tablebases import AbstractTablebaseManager, get_verified_chess_subclass
class ChessTablebaseManager(AbstractTablebaseManager):
"""
Each tablebase has a descriptor, in a form such as KQkn (king and queen vs king and knight).
Only the symmetrically unique variants of each position are stored in the tablebases.
"""
def __init__(self, GameClass=None):
super().__init__(get_verified_chess_subclass(GameClass))
def query_position(self, state, outcome_only=False):
"""
Checks if the given position is in one of the existing tablebases.
Returns a tuple containing the state after the optimal move has been made, the game's outcome,
and the terminal distance.
If the position is not available in the tablebases, then (None, np.nan, np.nan) will be returned.
If the position is a draw by insufficient material, then (None, 0, 0) will be returned.
:param state:
:param outcome_only: If True, then the state after the move has been made
will not be included in the returned tuple.
"""
if self.GameClass.is_over(state):
outcome = self.GameClass.get_winner(state)
return (outcome, 0) if outcome_only else (None, outcome, 0)
if np.any(state[:, :, -2] == 1):
# any positions with en passant or castling are excluded from the tablebase
return (np.nan, np.nan) if outcome_only else (None, np.nan, np.nan)
symmetry_transform = SymmetryTransform(self.GameClass, state)
transformed_state = symmetry_transform.transform_state(state)
descriptor = self.GameClass.get_position_descriptor(transformed_state, pawn_ranks=True)
if descriptor in self.GameClass.DRAWING_DESCRIPTORS:
return (0, 0) if outcome_only else (None, 0, 0)
if descriptor not in self.available_tablebases:
return (np.nan, np.nan) if outcome_only else (None, np.nan, np.nan)
self.ensure_loaded(descriptor)
tablebase = self.tablebases[descriptor]
move_bytes = tablebase[self.GameClass.encode_board_bytes(transformed_state)]
(start_i, start_j, end_i, end_j), outcome, terminal_distance = self.GameClass.parse_move_bytes(move_bytes)
outcome = symmetry_transform.transform_outcome(outcome)
if outcome_only:
return outcome, terminal_distance
if terminal_distance == 0:
return None, outcome, 0
transformed_move_state = self.GameClass.apply_from_to_move(transformed_state, start_i, start_j, end_i, end_j)
move_state = symmetry_transform.untransform_state(transformed_move_state)
return move_state, outcome, terminal_distance
def get_random_endgame(self, descriptor, condition=None):
state = super().get_random_endgame(descriptor, condition)
return SymmetryTransform.random(self.GameClass, descriptor).transform_state(state)
|
the-stack_106_29331 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = next(reversed(self))
else:
key = next(iter(self))
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
the-stack_106_29332 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import re
from io import StringIO
DESCRIPTION_LINE_LENGTH_LIMIT = 120
TAB_SECTION_START = '<!-- xxx tabs xxx -->'
TAB_SECTION_END = '<!-- xxz tabs xxx -->'
TAB_START = '<!-- xxx tab "{}" xxx -->'
TAB_END = '<!-- xxz tab xxx -->'
INLINE_REF = re.compile(r"(\[[\s\S]+?\])\s*(\(.*?\))")
class ReadmeWriter(object):
def __init__(self):
self.writer = StringIO()
self.errors = []
def write(self, *strings):
for s in strings:
self.writer.write(s)
def new_error(self, s):
self.errors.append(s)
@property
def contents(self):
return self.writer.getvalue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.writer.close()
def update_links(link, links):
newref = max(links.values() or [0]) + 1
links[link] = newref
def process_links(section, links):
"""Extract inline links and replace with references."""
text = section['description']
matches = INLINE_REF.findall(text)
for m in matches:
lnk = m[1]
if lnk not in links:
update_links(lnk, links)
# replace (link) with [ref]
newtext = INLINE_REF.sub(lambda x: '{}[{}]'.format(x.group(1), links[x.group(2)]), text)
section['description'] = newtext
def write_section(section, writer):
header = '{} {}'.format('#' * section['header_level'], section['name'])
description = section['description']
writer.write(header)
writer.write('\n\n')
writer.write(description)
writer.write('\n')
def get_references(links):
refs = []
for link, ref in links.items():
line = f"[{ref}]: {link.lstrip('(').rstrip(')')}"
refs.append(line)
return '\n'.join(refs)
class ReadmeConsumer(object):
def __init__(self, spec):
self.spec = spec
def render(self):
files = {}
for file in self.spec['files']:
with ReadmeWriter() as writer:
links = dict()
writer.write('# Agent Check: {}'.format(self.spec['name']))
writer.write('\n\n')
sections = file['sections']
tab = None
for section in sections:
if section['hidden']:
continue
if section['tab']:
if tab is None:
tab = section['tab']
writer.write(TAB_SECTION_START + '\n')
else:
writer.write(TAB_END + '\n')
writer.write(TAB_START.format(tab) + '\n')
writer.write('\n')
elif tab is not None:
writer.write(TAB_END + '\n')
writer.write(TAB_SECTION_END + '\n')
writer.write('\n')
tab = None
process_links(section, links)
write_section(section, writer)
writer.write('\n')
# add link references to the end of document
refs = get_references(links)
writer.write(refs)
files[file['render_name']] = (writer.contents, writer.errors)
return files
|
the-stack_106_29333 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# @Time : 2020-07-16 09:01
# @Author : WangCong
# @Email : [email protected]
import numpy as np
import cv2
camera_parameter = {
# R
"R": [[-0.91536173, 0.40180837, 0.02574754],
[0.05154812, 0.18037357, -0.98224649],
[-0.39931903, -0.89778361, -0.18581953]],
# T
"T": [1841.10702775, 4955.28462345, 1563.4453959],
# f/dx, f/dy
"f": [1145.04940459, 1143.78109572],
# center point
"c": [512.54150496, 515.45148698]
}
def pixel_to_world(camera_intrinsics, r, t, img_points):
K_inv = camera_intrinsics.I
R_inv = np.asmatrix(r).I
R_inv_T = np.dot(R_inv, np.asmatrix(t))
world_points = []
coords = np.zeros((3, 1), dtype=np.float64)
for img_point in img_points:
coords[0] = img_point[0]
coords[1] = img_point[1]
coords[2] = 1.0
cam_point = np.dot(K_inv, coords)
cam_R_inv = np.dot(R_inv, cam_point)
scale = R_inv_T[2][0] / cam_R_inv[2][0]
scale_world = np.multiply(scale, cam_R_inv)
world_point = np.asmatrix(scale_world) - np.asmatrix(R_inv_T)
pt = np.zeros((3, 1), dtype=np.float64)
pt[0] = world_point[0]
pt[1] = world_point[1]
pt[2] = 0
world_points.append(pt.T.tolist())
return world_points
if __name__ == '__main__':
f = camera_parameter["f"]
c = camera_parameter["c"]
camera_intrinsic = np.mat(np.zeros((3, 3), dtype=np.float64))
camera_intrinsic[0, 0] = f[0]
camera_intrinsic[1, 1] = f[1]
camera_intrinsic[0, 2] = c[0]
camera_intrinsic[1, 2] = c[1]
camera_intrinsic[2, 2] = np.float64(1)
r = camera_parameter["R"]
t = np.asmatrix(camera_parameter["T"]).T
# img_points = [[100, 200],
# [150, 300]]
img_points = np.array(([100, 200],
[150, 300]), dtype=np.double)
result = pixel_to_world(camera_intrinsic, r, t, img_points)
print(result)
print('----')
axis = np.float32([[7700, 73407, 0], [-66029, -605036, 0]])
r2 = np.asmatrix(camera_parameter["R"])
result2, _ = cv2.projectPoints(axis, r2, t, camera_intrinsic, 0)
print(result2)
|
the-stack_106_29335 | import numpy as np
from datetime import datetime
import time
from flask import Flask
from flask import Markup
from flask import Flask, request
from flask import render_template
from . import database
from .core import publishers
from . import apis
from . import exceptions
app = Flask(__name__)
def get_weekly_topic_stats(language, topic):
rows = None
with database.connection() as conn:
cur = conn.cursor()
begin = datetime(2018, 10, 1)
cur.execute(f"""SELECT date_trunc('week', created_at::date) AS week,
COUNT(*), AVG(negative) AS avg_negative, AVG(neutral) AS avg_neutral,
AVG(positive) AS avg_positive
FROM tweets
WHERE '{begin}'::date < created_at
AND '{language}' = language
AND '{topic}' = ANY(topics)
GROUP BY week
ORDER BY week;""")
rows = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
cur.close()
rows = [dict(zip(colnames, row)) for row in rows]
for row in rows:
row["week"] = row["week"].strftime("%Y-%m-%d")
return rows
def get_thirty_days_topics(language):
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute(f"SELECT begin, topics FROM thirty_days_topics WHERE language = '{language}'")
res = cur.fetchall()
cur.close()
# return [{"begin": begin, "topics": topics} for begin, topics in res]
return [{"topics": topics} for _, topics in res]
@app.route("/")
def chart():
languages = database.get_languages()
topics = database.get_topics()
topics_stats = {}
thirty_days_stats = {}
for language in languages:
topics_stats[language] = {}
for topic in topics:
topics_stats[language][topic] = get_weekly_topic_stats(language, topic)
thirty_days_stats[language] = get_thirty_days_topics(language)
return render_template('index.html', thirty_days_stats=thirty_days_stats)
# return render_template('chart.html', languages=languages, topics_stats=topics_stats,
# thirty_days_stats=thirty_days_stats, topics=topics)
def get_weekly_count(token):
rows = None
begin = datetime(2018, 10, 1)
with database.connection() as conn:
cur = conn.cursor()
cur.execute(f"""SELECT date_trunc('week', created_at::date) AS week,
COUNT(*)
FROM tweets
WHERE '{token}' = ANY(tokens) AND '{begin}'::date < created_at
GROUP BY week
ORDER BY week;""")
rows = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
cur.close()
rows = [dict(zip(colnames, row)) for row in rows]
for row in rows:
row["week"] = row["week"].strftime("%Y-%m-%d")
return rows
@app.route('/search_engine')
def search_engine():
tokens = []
if "text" in request.args:
text = request.args.get('text')
tokens = [token.strip() for token in text.lower().split(',')]
weekly_count = {}
for token in tokens:
weekly_count[token] = get_weekly_count(token)
return render_template('search_engine.html', tokens_weekly_count=weekly_count, tokens=tokens)
def get_token_evolution(token):
totals_view = """
CREATE VIEW totals AS
SELECT publisher, date_trunc('month', created_at) AS month, COUNT(*)
FROM tweets
GROUP BY publisher, month
"""
tokens_view = """
CREATE MATERIALIZED VIEW tokenized_tweets AS
SELECT publishers.name AS publisher, date_trunc('month', created_at) AS month,
to_tsvector('simple', unaccent(text)) AS tokens
FROM tweets
JOIN publishers ON tweets.publisher = publishers.screen_name
WITH DATA
"""
# tokens_view = """
# CREATE MATERIALIZED VIEW tokenized_tweets AS
# SELECT publishers.name AS publisher, date_trunc('month', created_at) AS month,
# to_tsvector(publishers.language::regconfig, unaccent(text)) AS tokens
# FROM tweets
# JOIN publishers ON tweets.publisher = publishers.screen_name
# WITH DATA
# """
query = f"""
SELECT publisher, COUNT(*) AS matches, month
FROM tokenized_tweets
WHERE tokens @@ to_tsquery('{token}')
GROUP by publisher, month
ORDER BY publisher, month
"""
# query = f"""
# SELECT totals.month, totals.publisher,
# CASE WHEN matching.count is NULL THEN 0 ELSE matching.count END AS matches, totals.count AS total
# FROM (SELECT publisher, COUNT(*), month
# FROM tokenized_tweets
# WHERE tokens @@ to_tsquery('{token}')
# GROUP by publisher, month) as matching
# RIGHT OUTER JOIN totals ON matching.publisher = totals.publisher and matching.month = totals.month
# ORDER BY totals.publisher, totals.month
# """
# query = f"""
# SELECT total.month, total.publisher,
# CASE WHEN matching.count is NULL THEN 0 ELSE matching.count END AS matches,
# total.count AS total
# FROM (SELECT publisher, count(*), date_trunc('month', created_at) AS month
# FROM tweets JOIN publishers ON tweets.publisher = publishers.screen_name
# WHERE to_tsvector(publishers.language::regconfig, unaccent(text)) @@ to_tsquery('{token}')
# GROUP BY publisher, month) AS matching
# RIGHT OUTER JOIN (SELECT publisher, date_trunc('month', created_at) AS month, COUNT(*)
# FROM tweets
# GROUP BY publisher, month) AS total
# ON matching.publisher = total.publisher AND matching.month = total.month
# ORDER BY total.publisher, total.month
# """
with database.connection() as conn:
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
cur.close()
data = [dict(zip(colnames, row)) for row in rows]
for i, record in enumerate(data):
data[i]["month"] = record["month"].strftime('%Y-%m')
return data
@app.route("/token_evolution", methods=["GET"])
def token_evolution():
data = None
query = None
if "token" in request.args:
query = request.args["token"]
data = get_token_evolution(request.args["token"])
# data = [{"month": datetime(2018, 11, 1).strftime('%Y-%m'), "publisher": "a", "matches": 23, "total": 20},{"month": datetime(2018, 12, 1).strftime('%Y-%m'), "publisher": "a", "matches": 22, "total": 20},
# {"month": datetime(2018, 10, 1).strftime('%Y-%m'), "publisher": "b", "matches": 4, "total": 20}, {"month": datetime(2018, 11, 1).strftime('%Y-%m'), "publisher": "b", "matches": 1, "total": 20}]
return render_template('token_evolution.html', data=data, query=query)
def run_query(query):
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute(query)
res = cur.fetchall()
return render_template('db_stats.html', query_text=query_response)
@app.route('/db_stats')
def db_stats():
tokens = []
query_response = ""
if "text" in request.args and request.args.get('text') != "":
query = request.args.get('text')
query_response = run_query(query)
if query_response is not None:
query_response = [', '.join([str(item) for item in row]) for row in query_response]
query_response = '<br>'.join(query_response)
return render_template('db_stats.html', query_text=query_response)
@app.route('/publishers')
def publishers_route():
print(request.args)
message = None
languages = database.get_languages()
if ("screen_name" in request.args and request.args.get("screen_name") != "") \
and ("language" in request.args):
language = request.args.get("language").lower()
if language not in languages:
message = f"'{language}' not in {database.get_languages()}"
else:
twitter_name = request.args.get("screen_name")
api = apis.get_twitter()
try:
publisher = publishers.get_info(api, twitter_name)
except exceptions.InvalidTwitterUserException as exc:
message = f"Twitter user '{twitter_name}' not found"
else:
try:
publishers.insert_db([publisher])
message = f"Added {publisher['name']} - {publisher['screen_name']} into DB"
except exceptions.DuplicateDBEntryException:
message = f"{publisher['name']} is already present in DB"
publishers_stats = publishers.get_publishers()
for publisher in publishers_stats:
for key in publisher:
if publisher[key] is None or key == "insert_timestamp":
publisher[key] = ""
return render_template('publishers.html', publishers=publishers_stats, message=message,
language_pattern='|'.join(languages))
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080)
# app.run(host="192.168.1.237", port=8080)
# app.run(host="192.168.1.11", port=8080)
|
the-stack_106_29336 | # a123_apple_1.py
import turtle as trtl
#-----setup-----
t=0
tstep = 0.25
delx = 2
g = -1
yvel = 20
y=0
apple_image = "apple.gif" # Store the file name of your shape
pear_image = "pear.gif" # Store the file name of your shape
wn = trtl.Screen()
wn.setup(width=0.3, height=0.35)
wn.bgpic("background.gif")
wn.addshape(apple_image) # Make the screen aware of the new file
wn.addshape(pear_image) # Make the screen aware of the new file
# apple = trtl.Turtle()
# apple.hideturtle()
pear = trtl.Turtle()
WorldBorder = (3,4)
drawer = trtl.Turtle()
posVal = True
#-----functions-----
# given a turtle, set that turtle to be shaped by the image file
def border_control(border):
for object_range in range(0,100,1):
border.goto(100,-200)
def draw_apple(active_apple):
global posVal
active_apple.shape(apple_image)
active_apple.penup()
active_apple.goto(0,-200)
if posVal == True:
posVal = False
active_apple.goto(-280,-200)
else:
posVal = True
active_apple.goto(280,-200)
wn.update()
def draw_pear(activate_pear):
activate_pear.shape(pear_image)
wn.update()
def object_fall(object):
global t, yvel, y, tstep, delx, g
for vel_sample in range(0,5,1):
object.goto(vel_sample,-200)
num = 0
while (5):
t=t+tstep
x=t*delx
yvel = yvel + g * tstep
y=y + yvel * tstep
object.goto(x,y)
num = num + 1
if object.ycor() < -200:
print("OBJECT BORDER REACHED: ",object.ycor())
if num > 200:
object.clear()
t=0
tstep = 0.25
delx = 2
g = -1
yvel = 20
y=0
break
print(num)
def get_mouse_click_coor(x, y):
print(x, y)
def draw_an_A():
drawer.color("blue")
drawer.write("A", font=("Arial", 74, "bold"))
drawer.penup()
drawer.forward(30)
draw_pear(pear)
object_fall(pear)
#-----function calls-----
trtl.onscreenclick(get_mouse_click_coor)
wn.onkeypress(draw_an_A, "a")
wn.listen()
wn.mainloop() |
the-stack_106_29337 | import requests
import pandas as pd
import datetime as dt
import numpy as np
#### dict_keys(['table', 'currency', 'code', 'rates'])
def getCurrencyLow(days, currency, currencyName): # Getting currency below 350 days (used for getting data from larger time periods)
today = dt.datetime.today()
for i in range(days + 1):
last = today - dt.timedelta(days = i)
startDate = today.strftime('%Y-%m-%d')
endDate = last.strftime('%Y-%m-%d')
connect = 'http://api.nbp.pl/api/exchangerates/rates/a/' + currency + '/' + endDate + '/' + startDate
connectResult = requests.get(connect)
response = connectResult.json()
data = pd.DataFrame(response['rates'], columns=['effectiveDate', 'mid'], index=None)
NewNames = {'effectiveDate': 'Data', 'mid': currencyName}
data.rename(columns=NewNames, inplace=True)
return data
def getCurrency(numberofDays, currency, currencyName): # Downloading currency data from larger time periods
if numberofDays < 350:
result = getCurrencyLow(numberofDays, currency, currencyName)
return result
else:
results = []
condition = round(numberofDays / 350)
currentDays = 350
start = dt.datetime.today()
#print("condition:", condition)
for section in range(condition + 1):
#print("section:", section)
for i in range(currentDays + 1):
last = start - dt.timedelta(days=i)
startDate = start.strftime('%Y-%m-%d')
endDate = last.strftime('%Y-%m-%d')
connect = 'http://api.nbp.pl/api/exchangerates/rates/a/' + currency + '/' + endDate + '/' + startDate
#print(connect)
connectResult = requests.get(connect)
response = connectResult.json()
data = pd.DataFrame(response['rates'], columns=['effectiveDate', 'mid'], index=None)
NewNames = {'effectiveDate': 'Data', 'mid': currencyName}
data.rename(columns=NewNames, inplace=True)
data.sort_values(by='Data', inplace=True, ascending=False)
data = data.reset_index(drop=True)
results.append(data)
start = last
numberofDays -= 350
if numberofDays < 350:
currentDays = numberofDays
return results
#################################################################################
############################### Final function ##################################
#
# Modifying downloaded data, adding the previous day
# to holidays in order to complete all records.
#
#################################################################################
def finalDownload(finalDays, finalCurrencyCode, finalCurrencyName):
tempCurrency = getCurrency(finalDays, finalCurrencyCode, finalCurrencyName)
downloadedCurrency = tempCurrency[0]
for i in range(len(tempCurrency) - 1):
downloadedCurrency = downloadedCurrency.append(tempCurrency[i + 1])
downloadedCurrency.drop_duplicates(subset='Data', inplace=True)
downloadedCurrency = downloadedCurrency.reset_index(drop=True)
downloadedCurrency = downloadedCurrency.astype({'Data': 'datetime64'})
daysResult = []
Results = []
for i in range(downloadedCurrency.shape[0] - 1):
result = (downloadedCurrency['Data'][i] - downloadedCurrency['Data'][i + 1]).days
if result != 1:
Results.append(result)
daysResult.append([downloadedCurrency['Data'][i], downloadedCurrency['Data'][i + 1],
downloadedCurrency[finalCurrencyName][i + 1]])
for i in range(len(daysResult)):
checkFinal = downloadedCurrency['Data'].isin([daysResult[i][1]])
checkFinalindex = checkFinal[checkFinal == True].index
slicePart = checkFinalindex[0]
for j in range(Results[i] - 1):
line = pd.DataFrame({'Data': 'Dzień wolny', finalCurrencyName: daysResult[i][2]}, index=[slicePart])
downloadedCurrency = downloadedCurrency.append(line, ignore_index=False)
downloadedCurrency = downloadedCurrency.sort_index().reset_index(drop=True)
del downloadedCurrency['Data']
newDates = []
toDay = dt.datetime.today()
for k in range(downloadedCurrency.shape[0]):
lastDay = toDay - dt.timedelta(days=k)
newDates.append(lastDay.strftime('%Y-%m-%d'))
downloadedCurrency.insert(0, 'Data', newDates)
downloadedCurrency.sort_values(by='Data', inplace=True, ascending=False)
downloadedCurrency = downloadedCurrency.reset_index(drop=True)
return downloadedCurrency
|
the-stack_106_29340 | import os
import cloudpickle
from quake.client.base.client import Client
from quake.client.base.plan import Plan
_global_plan = Plan()
_global_client = None
_pickle_cache = {}
_inouts_objs = {}
def get_inout_obj(obj):
pair = _inouts_objs.get(id(obj))
if pair:
return pair[0]
else:
return None
def set_inout_obj(obj, value):
_inouts_objs[id(obj)] = (value, obj)
def pop_inout_obj(obj):
obj_id = id(obj)
if obj_id is not _inouts_objs:
return None
pair = _inouts_objs.pop(obj_id)
return pair[0]
def pickle_function(fn):
data = _pickle_cache.get(fn)
if data is None:
data = cloudpickle.dumps(fn)
_pickle_cache[fn] = data
return data
def get_global_plan():
return _global_plan
def flush_global_plan(client):
tasks = _global_plan.take_tasks()
if tasks:
client.submit(tasks)
# for task in tasks:
# print("QE: task_id = {} <-> call_id = {}, outputs={}"
# .format(task.task_id, task.call_id, task.n_outputs))
def ensure_global_client():
global _global_client
if _global_client is None:
server = os.environ.get("QUAKE_SERVER")
if server is None:
raise Exception(
"No global server is defined."
"Set variable QUAKE_SERVER or call quake.client.set_global_client()"
)
if ":" in server:
hostname, port = server.rsplit(":", 1)
try:
port = int(port)
except ValueError:
raise Exception("Invalid format of QUAKE_SERVER variable")
_global_client = Client(hostname, port)
else:
_global_client = Client(server)
return _global_client
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.