hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794502a18bee31b215c0a2a7da77542aba31669b | 880 | py | Python | linda/linda_app/admin.py | cbotsikas/LindaWorkbench | b2bfa091fb4ec80ac35b3f68edf46780c1e9ffea | [
"MIT"
] | null | null | null | linda/linda_app/admin.py | cbotsikas/LindaWorkbench | b2bfa091fb4ec80ac35b3f68edf46780c1e9ffea | [
"MIT"
] | null | null | null | linda/linda_app/admin.py | cbotsikas/LindaWorkbench | b2bfa091fb4ec80ac35b3f68edf46780c1e9ffea | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.admin import AdminSite
from django.utils.translation import ugettext_lazy
from models import *
class VocabularyAdmin(admin.ModelAdmin):
pass
admin.site.register(Vocabulary, VocabularyAdmin)
class VocabularyRankingAdmin(admin.ModelAdmin):
pass
admin.site.register(VocabularyRanking, VocabularyRankingAdmin)
class VocabularyCommentsAdmin(admin.ModelAdmin):
pass
admin.site.register(VocabularyComments, VocabularyCommentsAdmin)
class DatasourceDescriptionAdmin(admin.ModelAdmin):
pass
admin.site.register(DatasourceDescription, DatasourceDescriptionAdmin)
class VocabularyClassAdmin(admin.ModelAdmin):
pass
admin.site.register(VocabularyClass, VocabularyClassAdmin)
class VocabularyPropertyAdmin(admin.ModelAdmin):
pass
admin.site.register(VocabularyProperty, VocabularyPropertyAdmin) | 20 | 70 | 0.828409 |
794502cb76a63c4751694070a8feb659124ce651 | 6,627 | py | Python | build/lib/airlab/registration/registration.py | ltorres6/airlab | 83a2debebc4c880b51c545c2e95bc9c52e73f4ae | [
"Apache-2.0"
] | null | null | null | build/lib/airlab/registration/registration.py | ltorres6/airlab | 83a2debebc4c880b51c545c2e95bc9c52e73f4ae | [
"Apache-2.0"
] | null | null | null | build/lib/airlab/registration/registration.py | ltorres6/airlab | 83a2debebc4c880b51c545c2e95bc9c52e73f4ae | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch as th
from numpy import inf, max
class _Registration:
def __init__(self, verbose=True):
# transformation of the image
self._transformation = None
# image similarity measure
self._image_loss = None
# optimizer
self._optimizer = None
self._number_of_iterations = 100
self._displacement = None
self._verbose = verbose
self.loss = inf
def set_optimizer(self, optimizer):
self._optimizer = optimizer
def set_number_of_iterations(self, number_of_iterations):
self._number_of_iterations = number_of_iterations
def set_transformation(self, transformation):
self._transformation = transformation
def set_image_loss(self, loss):
self._image_loss = loss
class _PairwiseRegistration(_Registration):
def __init__(self, verbose=True):
super(_PairwiseRegistration, self).__init__(verbose=verbose)
# regulariser on the displacement
self._regulariser_displacement = []
# regulariser on the parameters
self._regulariser_parameter = []
def set_regulariser_displacement(self, regulariser_displacement):
self._regulariser_displacement = regulariser_displacement
def set_regulariser_parameter(self, regulariser_parameter):
self._regulariser_parameter = regulariser_parameter
class _GroupwiseRegistration(_Registration):
def __init__(self, verbose=True):
super(_GroupwiseRegistration, self).__init__(verbose=verbose)
self._images = None
def SetImages(self, images):
self._images = images
class _ImageSeriesRegistration(_Registration):
def __init__(self, verbose=True):
super(_GroupwiseRegistration, self).__init__(verbose=verbose)
self._image_series = None
self._fixed_image = None
def SetImageSeries(self, images):
self._images = images
def SetFixedImage(self, image):
self._fixed_image = image
class PairwiseRegistration(_PairwiseRegistration):
def __init__(self, verbose=True):
super(PairwiseRegistration, self).__init__(verbose=verbose)
def _closure(self):
self._optimizer.zero_grad()
displacement = self._transformation()
# compute the image loss
lossList = []
loss_names = []
for image_loss in self._image_loss:
lossList.append(image_loss(displacement))
loss_names.append(image_loss.name)
# compute the regularisation loss on the displacement
for reg_disp in self._regulariser_displacement:
lossList.append(reg_disp(displacement))
loss_names.append(reg_disp.name)
# compute the regularisation loss on the parameter
for reg_param in self._regulariser_parameter:
lossList.append(reg_param(self._transformation.named_parameters()))
loss_names.append(reg_param.name)
if self._verbose:
for loss_value, loss_name in zip(lossList, loss_names):
print(str(loss_name) + ": " + str(loss_value.data.item()) + " ", end="", flush=True)
print("")
# sum up all loss terms
loss = sum(lossList)
loss.backward()
return loss
def start(self, EarlyStopping=False, StopPatience=10):
self.loss_history = []
if EarlyStopping:
n = 0
try:
self.loss
except:
self.loss = inf
for iter_index in range(self._number_of_iterations):
if self._verbose:
print(str(iter_index) + " ", end="", flush=True)
loss = self._optimizer.step(self._closure)
self.loss_history.append(loss)
if EarlyStopping:
if loss < self.loss:
n = 0
self.loss = loss
best = self._transformation.state_dict()
else:
n += 1
if n > StopPatience:
self._transformation.load_state_dict(best)
return
self.loss = loss
class DemonsRegistration(_Registration):
def __init__(self, verbose=True):
super(DemonsRegistration, self).__init__(verbose=verbose)
# regulariser on the displacement
self._regulariser = []
def set_regulariser(self, regulariser):
self._regulariser = regulariser
def _closure(self):
self._optimizer.zero_grad()
displacement = self._transformation()
# compute the image loss
lossList = []
loss_names = []
for image_loss in self._image_loss:
lossList.append(image_loss(displacement))
loss_names.append(image_loss.name)
if self._verbose:
for loss_value, loss_name in zip(lossList, loss_names):
print(str(loss_name) + ": " + str(loss_value.data.item()) + " ", end="", flush=True)
print("")
# sum up all loss terms
loss = sum(lossList)
loss.backward()
return loss
def start(self, EarlyStopping=False, stop_window=10, stop_criterion=1e-6):
if EarlyStopping:
self.loss_history = []
count = 0
for iter_index in range(self._number_of_iterations):
if self._verbose:
print(str(iter_index) + " ", end="", flush=True)
loss = self._optimizer.step(self._closure)
for regulariser in self._regulariser:
regulariser.regularise(self._transformation.parameters())
if EarlyStopping:
count += 1
self.loss_history.append(loss)
if count >= stop_criterion:
mean_change = th.mean(th.as_tensor(self.loss_history[-stop_window:]))
print(f"mean change: {mean_change}")
if mean_change < stop_criterion:
return
| 31.259434 | 100 | 0.6303 |
79450327f865cdb09532663fa652c0650923d4f0 | 834 | py | Python | demos/p0-io.py | utep-cs-systems-courses/1-shell-HectorRichart | 2c9e039614acb7ed7704999ec5f9b2d8d4a9d34e | [
"BSD-3-Clause"
] | null | null | null | demos/p0-io.py | utep-cs-systems-courses/1-shell-HectorRichart | 2c9e039614acb7ed7704999ec5f9b2d8d4a9d34e | [
"BSD-3-Clause"
] | null | null | null | demos/p0-io.py | utep-cs-systems-courses/1-shell-HectorRichart | 2c9e039614acb7ed7704999ec5f9b2d8d4a9d34e | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
import os, sys, re
fdOut = os.open("p0-output.txt", os.O_CREAT | os.O_WRONLY)
fdIn = os.open("p0-io.py", os.O_RDONLY)
print(f"fdIn={fdIn}, fdOut={fdOut}");
# note that
# fd #0 is "standard input" (by default, attached to kbd) keyboard
# fd #1 is "standard ouput" (by default, attached to display)
# fd #2 is "standard error" (by default, attached to display for error output)
lineNum = 1
while 1:
input = os.read(fdIn, 10000) # read up to 10k bytes
if len(input) == 0: break # done if nothing read
lines = re.split(b"\n", input)
for line in lines:
strToPrint = f"{lineNum:5d}: {line.decode()}\n"
os.write(fdOut, strToPrint.encode()) # write to output file
os.write(1 , strToPrint.encode()) # write to fd1 (standard output)
lineNum += 1
| 30.888889 | 79 | 0.630695 |
79450456b70e2d9a93b90196616ec9de929ea057 | 3,493 | py | Python | tests/posting/test_log2k8s.py | pawelkopka/kopf | 51a3a70e09a17cf3baec2946b64b125a90595cf4 | [
"MIT"
] | null | null | null | tests/posting/test_log2k8s.py | pawelkopka/kopf | 51a3a70e09a17cf3baec2946b64b125a90595cf4 | [
"MIT"
] | null | null | null | tests/posting/test_log2k8s.py | pawelkopka/kopf | 51a3a70e09a17cf3baec2946b64b125a90595cf4 | [
"MIT"
] | null | null | null | import logging
import pytest
from kopf.config import EventsConfig
from kopf.engines.logging import ObjectLogger, LocalObjectLogger
OBJ1 = {'apiVersion': 'group1/version1', 'kind': 'Kind1',
'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1'}}
REF1 = {'apiVersion': 'group1/version1', 'kind': 'Kind1',
'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1'}
@pytest.mark.parametrize('logfn, event_type', [
['info', "Normal"],
['warning', "Warning"],
['error', "Error"],
['critical', "Fatal"],
])
async def test_posting_normal_levels(caplog, logstream, logfn, event_type, event_queue, event_queue_loop):
logger = ObjectLogger(body=OBJ1)
logger_fn = getattr(logger, logfn)
logger_fn("hello %s", "world")
assert event_queue.qsize() == 1
event1 = event_queue.get_nowait()
assert event1.ref == REF1
assert event1.type == event_type
assert event1.reason == "Logging"
assert event1.message == "hello world"
assert caplog.messages == ["hello world"]
@pytest.mark.parametrize('logfn, event_type, min_levelno', [
['debug', "Debug", logging.DEBUG],
['info', "Normal", logging.INFO],
['warning', "Warning", logging.WARNING],
['error', "Error", logging.ERROR],
['critical', "Fatal", logging.CRITICAL],
])
async def test_posting_above_config(caplog, logstream, logfn, event_type, min_levelno,
event_queue, event_queue_loop, mocker):
logger = ObjectLogger(body=OBJ1)
logger_fn = getattr(logger, logfn)
mocker.patch.object(EventsConfig, 'events_loglevel', min_levelno)
logger_fn("hello %s", "world")
mocker.patch.object(EventsConfig, 'events_loglevel', min_levelno + 1)
logger_fn("must not be posted")
assert event_queue.qsize() == 1
event1 = event_queue.get_nowait()
assert event1.ref == REF1
assert event1.type == event_type
assert event1.reason == "Logging"
assert event1.message == "hello world"
assert caplog.messages == ["hello world", "must not be posted"]
@pytest.mark.parametrize('logfn', [
'debug',
])
async def test_skipping_hidden_levels(caplog, logstream, logfn, event_queue, event_queue_loop):
logger = ObjectLogger(body=OBJ1)
logger_fn = getattr(logger, logfn)
logger_fn("hello %s", "world")
logger.info("must be here")
assert event_queue.qsize() == 1 # not 2!
assert caplog.messages == ["hello world", "must be here"]
@pytest.mark.parametrize('logfn', [
'debug',
'info',
'warning',
'error',
'critical',
])
async def test_skipping_below_config(caplog, logstream, logfn, event_queue, event_queue_loop,
mocker):
logger = ObjectLogger(body=OBJ1)
logger_fn = getattr(logger, logfn)
mocker.patch.object(EventsConfig, 'events_loglevel', 666)
logger_fn("hello %s", "world")
mocker.patch.object(EventsConfig, 'events_loglevel', 0)
logger.info("must be here")
assert event_queue.qsize() == 1 # not 2!
assert caplog.messages == ["hello world", "must be here"]
@pytest.mark.parametrize('logfn', [
'debug',
'info',
'warning',
'error',
'critical',
])
async def test_skipping_when_local_with_all_levels(caplog, logstream, logfn, event_queue, event_queue_loop):
logger = LocalObjectLogger(body=OBJ1)
logger_fn = getattr(logger, logfn)
logger_fn("hello %s", "world")
assert event_queue.qsize() == 0
assert caplog.messages == ["hello world"]
| 31.468468 | 108 | 0.659319 |
794506b6c929125cc55d8f4a1dc084ffae4eb6c2 | 5,120 | py | Python | dxm/lib/DxFileFormat/DxFileFormat.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 5 | 2018-08-23T15:47:05.000Z | 2022-01-19T23:38:18.000Z | dxm/lib/DxFileFormat/DxFileFormat.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 59 | 2018-10-15T10:37:00.000Z | 2022-03-22T20:49:25.000Z | dxm/lib/DxFileFormat/DxFileFormat.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 12 | 2019-03-08T19:59:13.000Z | 2021-12-16T03:28:04.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : April 2018
import logging
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
from dxm.lib.masking_api.api.file_format_api import FileFormatApi
from dxm.lib.masking_api.rest import ApiException
from dxm.lib.masking_api.genericmodel import GenericModel
class DxFileFormat(object):
swagger_types = {
'file_format_id': 'int',
'file_format_name': 'str',
'file_format_type': 'str',
'header': 'int',
'footer': 'int'
}
swagger_map = {
'file_format_id': 'fileFormatId',
'file_format_name': 'fileFormatName',
'file_format_type': 'fileFormatType',
'header': 'header',
'footer': 'footer'
}
def __init__(self, engine):
"""
Constructor
:param1 engine: DxMaskingEngine object
:param2 execList: list of job executions
"""
#FileFormat.__init__(self)
self.__engine = engine
self.__logger = logging.getLogger()
self.__logger.debug("creating DxFileFormat object")
self.__api = FileFormatApi
self.__apiexc = ApiException
self.__obj = None
def from_filetype(self, filetype):
self.__obj = filetype
self.__obj.swagger_types = self.swagger_types
self.__obj.swagger_map = self.swagger_map
def create_fileformat(self, file_format_name, file_format_type):
"""
Create an connector object
:param connector_name
:param database_type
:param environment_id
"""
self.__obj = GenericModel({ x:None for x in self.swagger_map.values()}, self.swagger_types, self.swagger_map)
self.obj.file_format_name = file_format_name
self.obj.file_format_type = file_format_type
@property
def obj(self):
if self.__obj is not None:
return self.__obj
else:
return None
@property
def file_format_id(self):
if self.obj is not None:
return self.obj.file_format_id
else:
return None
@property
def file_format_name(self):
if self.obj is not None:
return self.obj.file_format_name
else:
return None
@property
def file_format_type(self):
if self.obj is not None:
return self.obj.file_format_type
else:
return None
def add(self):
"""
Add File type to Masking engine and print status message
return a None if non error
return 1 in case of error
"""
if (self.obj.file_format_name is None):
print_error("File format name is required")
self.__logger.error("File format name is required")
return 1
if (self.obj.file_format_type is None):
print_error("File format type is required")
self.__logger.error("File format type is required")
return 1
try:
self.__logger.debug("create filetype input %s" % str(self))
api_instance = self.__api(self.__engine.api_client)
self.__logger.debug("API instance created")
response = api_instance.create_file_format(self.obj.file_format_name,
self.obj.file_format_type)
self.from_filetype(response)
self.__logger.debug("filetype response %s"
% str(response))
print_message("Filetype %s added" % self.obj.file_format_name)
return 0
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
def delete(self):
"""
Delete fiel format from Masking engine and print status message
return a None if non error
return 1 in case of error
"""
api_instance = self.__api(self.__engine.api_client)
try:
self.__logger.debug("delete file format id %s"
% self.obj.file_format_id)
response = api_instance.delete_file_format(self.obj.file_format_id)
self.__logger.debug("delete file format response %s"
% str(response))
print_message("File format %s deleted" % self.obj.file_format_name)
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
| 31.604938 | 117 | 0.615234 |
794507366ee63c1df48639225d9f5d2da792d956 | 79,402 | py | Python | ProcessYcsbLog.py | TSDBBench/Overlord | d72b6927ceaf6631f5b07f411e34bec9904158c4 | [
"Apache-2.0"
] | 6 | 2017-07-05T16:59:16.000Z | 2020-07-01T10:17:09.000Z | ProcessYcsbLog.py | TSDBBench/Overlord | d72b6927ceaf6631f5b07f411e34bec9904158c4 | [
"Apache-2.0"
] | 8 | 2017-11-03T13:36:53.000Z | 2021-09-05T11:05:17.000Z | ProcessYcsbLog.py | TSDBBench/Overlord | d72b6927ceaf6631f5b07f411e34bec9904158c4 | [
"Apache-2.0"
] | 6 | 2016-11-10T12:56:41.000Z | 2018-06-19T21:53:58.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import copy
import gzip
from dateutil import tz
import jinja2
import logging
import argparse
import magic
import os
import re
import datetime
import locale
import cPickle
import bokeh.charts
from itertools import islice
import pdfkit
import pytz
import Util
import webcolors
import threading
import signal
__author__ = 'Andreas Bader'
__version__ = "0.03"
# in 0.02 some dicts were replaced with lists.
# reason: when parsing large result files (100k buckets = 400k lines)
# <dict>.keys() is getting relatevly slow, but amound of keys stays the same
# which is kind of strange
# so replacing those dicts with lists helps a great deal
# also some other speed-affecting changes were made
# Threading, blockwise-reading etc.
# some variable names will a bit weird (like <something>Dict for a list) because of this
# Also since 0.02 we won't save the whole log file into ydc anymore, as this seem to be a bit overkill
# This should not affect older 0.01 logs, but could happen. However, "LatencyList" was renamed, so this version can't read 0.01 anymore.
# Also correct timezone awareness was added
# 0.03 calculates 99% and 95% also in us, old files cannot be read. This reflects the update to YCSB 0.4.0
# For reading 0.2.0 files, replace "99thPercentileLatency(us)" with "99thPercentileLatency(ms)" and for 95th accordingly
# in Line 74, 75, 156, 157, 1207, 1208, 1252, 1253, 1254, 1255, 1296, 1297, 1298, 1299
plotColorDict={"DEFAULT" : "blue",
"INSERT0" : "red",
"INSERT1" : "darkred",
"READ0" : "orange",
"READ1" : "darkorange",
"CLEAN0" : "green",
"CLEAN1" : "darkgreen",
"UPDATE0" : "purple",
"UPDATE1" : "darkpurple"
}
defaultPlotColor=plotColorDict["DEFAULT"]
maxTableColumnsSingle=6
maxTableColumnsMulti=10
templateFile="template.html" #Jinja2 Template, see http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh.embed.file_html
templateFileMulti="template_multi.html" #Jinja2 Template, see http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh.embed.file_html
pdfOptions = {
'page-size': 'A4',
'margin-top': '0.5cm',
'margin-right': '0.5cm',
'margin-bottom': '0.5cm',
'margin-left': '0.5cm',
'encoding': "UTF-8",
'no-outline': None,
'quiet': '',
'dpi' : 600,
'image-dpi' : 600,
'image-quality' : 94,
'title' : ""
}
ignoreParams = ["Operations", "Return", "LatencyList"] # parameters that should be ignored (LatencyList e.g.)
possibleMissingParams = ["99thPercentileLatency(us)","95thPercentileLatency(us)"] #Params that can be missing, replaced by -1
convertFromUsToMs = ["AverageLatency(us)", "MinLatency(us)", "MaxLatency(us)", "99thPercentileLatency(us)","95thPercentileLatency(us)" ] # Some special parameters need to be converted
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGINT, signal_handler)
def find_time(timeString,logger):
timeRegex="[A-Za-z]+\s+[A-Za-z]+\s+[0-9]+\s+[0-9]+:[0-9]+:[0-9]+\s+[A-Za-z]+\s+[0-9]+"
if re.search(timeRegex,timeString) != None:
act_loc=locale.getlocale()
try:
locale.setlocale(locale.LC_ALL,'en_US.UTF-8')
except Exception, e:
logger.error('Failed to set locale, do you have locale en_US.UTF-8 installed?', exc_info=True)
os._exit(-1)
try:
timeObj=datetime.datetime.strptime(re.search(timeRegex,timeString).group(), '%a %b %d %H:%M:%S %Z %Y')
timeObj=timeObj.replace(tzinfo=tz.gettz(re.search(timeRegex,timeString).group().split(" ")[4]))
locale.setlocale(locale.LC_ALL,act_loc)
return timeObj
except Exception, e:
logger.warning("Failed to parste timezone from '%s', got '%s'. Setting it to UTC, this may be wrong."
%(re.search(timeRegex,timeString).group(),
re.search(timeRegex,timeString).group().split(" ")[4]), exc_info=True)
timeObj=timeObj.replace(tzinfo=tz.gettz('UTC'))
locale.setlocale(locale.LC_ALL,act_loc)
return timeObj
else:
logger.error("Can't find time in '%s'." %(timeString))
os._exit(-1)
# converts timestring to timeobject
# checks if value is already set (should not happen that a value is found twice)
def process_time(line,key,dict,logger):
if key in dict.keys():
logger.error("Found two %s, this should not happen!" %(key))
os._exit(-1)
dict[key] = find_time(line,logger)
# converts spacestring
def process_space(line, dict,logger):
spaceSplitters = line.replace("SPACE: ","").split(" ")
if len(spaceSplitters) == 3:
dict["spaceBegin"] = spaceSplitters[0]
dict["spaceBetween"] = spaceSplitters[1]
dict["spaceEnd"] = spaceSplitters[2]
else:
logger.error("Error while processing space string '%s'" %(line))
os._exit(-1)
# converts float (throughput, runtime)
def process_float(line, dict, key, logger):
splitters = line.split(", ")
if len(splitters) == 3:
dict[key] = float(splitters[2])
else:
logger.error("Error while processing float string '%s'" %(line))
os._exit(-1)
# checks if we have a line according to one of the known blocks
def check_block(line, knownBlocktypes):
for type in knownBlocktypes:
if re.search("^\[%s\]" %(type),line) != None:
return True
return False
# parses one actual line (=value) into a existing block dict
# blockDict = dict which contains values of the actual block (block ex. READ)
# two typical ycsb lines as example:
# [READ], 99thPercentileLatency(us), 4
# [READ], 0, 397
# this translates as follows to the arguments:
# [blockName], name, value
# rewrote in 0.02 for using lists in some steps instead of dicts, brings some speedup!
def parse_block(blockDict, blockName, name, value, timeSeries, logger):
# save every known name + conversion method
knownValues={"Operations": int,
"AverageLatency(us)" : float,
"MinLatency(us)" : int,
"MaxLatency(us)" : int,
"95thPercentileLatency(us)": int,
"99thPercentileLatency(us)" : int}
if name in knownValues.keys():
# Check if value was already found in this block, should not happen!
if name not in blockDict.keys():
try:
blockDict[name] = knownValues[name](value)
except ValueError:
logger.error("Error while convertion in block '%s' with value '%s'." %(blockName, value))
os._exit(-1)
else:
logger.error("Found another '%s' value in block '%s' with value '%s'. Should not happen." %(name, blockName, value))
os._exit(-1)
elif "Return=" in name:
# Return Value looks like this name: Return=0, value: 123
if "Return" not in blockDict.keys():
blockDict["Return"] = []
try:
blockDict["Return"].append([int(name.replace("Return=","")),int(value)])
except ValueError:
logger.error("Error while convertion in block '%s' with value '%s'." %(blockName, value))
os._exit(-1)
else:
# here we should have only histogram or timeseries values
# ex. [READ], 0, 397
# ex. [READ], 1, 101
# the last value has propbably a ">" in its name, ignore that.
try:
if "LatencyList" not in blockDict.keys():
blockDict["LatencyList"] = []
if timeSeries:
blockDict["LatencyList"].append(float(value)/1000.0)
else:
blockDict["LatencyList"].append(int(value))
except ValueError:
logger.error("Error while convertion in block '%s' with value '%s' and name '%s'." %(blockName, value, name))
os._exit(-1)
except:
logger.error("Unknown error occured while convertion in block '%s' with value '%s' and name '%s'. Maybe unknwon block value?" %(blockName, value, name))
os._exit(-1)
# processes one actual known block
# look below at process_file(..) for more explanation (data structure,..)
# actBlock should be mutable, but Strings are immutable, so actBlock should be a list with a string in it.. ;)
# rewrote in 0.02 for using lists in some steps instead of dicts, brings some speedup!
def process_block(fileDict, line, blockIndices, actBlock, timeSeries, logger):
splitters = line.split(", ")
if len(splitters) == 3:
# check if it is the first block we ever encounter or if we are at a new block
blockName=splitters[0].replace("[","").replace("]","")
if actBlock[0] == "" or blockName != actBlock[0]:
# save that we are in a new block
actBlock[0] = blockName
# check if fileDict contains the blocks dict
if "blocks" not in fileDict.keys():
fileDict["blocks"] = {}
# check if dict already knews it
if blockName not in fileDict["blocks"].keys():
fileDict["blocks"][blockName] = []
# fileDict["blocks"][blockName] = {}
# search for new index if types of this block already exist
newIndex=0
if len(fileDict["blocks"][blockName]) > 0:
newIndex = len(fileDict["blocks"][blockName])
fileDict["blocks"][blockName].append({})
parse_block(fileDict["blocks"][blockName][newIndex], blockName, splitters[1], splitters[2], timeSeries, logger)
else:
# okay we just have to add some value to this already seen block
parse_block(fileDict["blocks"][blockName][-1], blockName, splitters[1], splitters[2], timeSeries, logger)
else:
logger.error("Error while processing line '%s'" %(line))
os._exit(-1)
def process_file(filename, timeSeries, fileDict, compressedFileName, logger):
if not Util.check_file_readable(filename):
logger.error("Can't open %s." % (filename))
return
file = open(filename,"r")
actBlock=[""] # saves in which type of block we are, must be mutable!
knownBlocktypes=["CLEANUP", "INSERT", "READ", "UPDATE", "SCAN", "AVG", "SUM", "COUNT"] # saves which types of blocks we know
# fileDict must be mutable!
# the following keys can exist
# dbName -> name of dbms (mysql e.q.)
# dbDesc -> description of dbms (mysql e.q.)
# description -> description of the workload
# errors -> [] (errors occured)
# warnings -> [] (warnings occured)
# exceptions -> [] (exceptions occured)
# workload -> name of workload (workloada e.g.)
# startTime -> overall start time
# endTime -> overall end time
# startRunTime -> RUN phase start time
# endRunTime -> RUN phase end time
# startLoadTime -> LOAD phase start time
# endLoadTime -> LOAD phase end time
# spaceBegin -> Space of DB folder before any workload (before LOAD phase)
# spaceBetween -> Space of DB folder between LOAD and RUN phase
# spaceEnd -> Space of DB folder after RUN phase
# runtimeLoad -> runtime (ycsb) of LOAD phase
# runtimeRun -> runtime (ycsb) of RUN phase
# throughputLoad -> throughput (ycsb) of RUN phase
# throughputRun -> throughput (ycsb) of RUN phase
# blocks -> dict of blocks (ycsb)
# filecontent -> content of original ycsb file (ycsb) -> Dropped with 0.02!
# timeseries -> true/false -> generate ts output (arguments can overwrite it)
# granularity -> integer for ts granularity
# bucket -> integer for histogram buckets
# (ycsb) means that this is measured by ycsb
# blocks itself looks like:
# {"CLEANUP": {}, "INSERT": {}, "READ": {}, "UPDATE": {}}
# the embedded dicts look like this (ex. for INSERT)
# {0 : {}, 1 : {}} # 0 = first encountered INSERT block, 1 = second encountered INSERT block ...
# {} contains the actual values of one block
# ex. one READ Block (latencies are truncated)
# [READ], Operations, 511
# [READ], AverageLatency(us), 860.4833659491194
# [READ], MinLatency(us), 404
# [READ], MaxLatency(us), 14309
# [READ], 95thPercentileLatency(us), 1
# [READ], 99thPercentileLatency(us), 4
# [READ], Return=0, 511
# [READ], 0, 397
# [READ], 1, 101
# ...
# the dict would be.: {"Operations" : 511, "AverageLatency(us)" : 860.4833659491194, "MinLatency(us)": 404, "MaxLatency(us)" : 14309 ,"95thPercentileLatency(us)" : 1, "99thPercentileLatency(us)" : 4, "Return" : [0,511] , "LatencyList" : {0 : 397, 1 : 101, ,...} }
# look at https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload how to interpret the ycsb values
# in case timeseries instead of histogramm is used, there will be floats instead of integers as values (ex. [READ], 1, 101.11)
# split filename to get db & workload name
# e.g. ycsb_mysql_workloada_201507282238.log
fileNameSplitters=filename.split("_")
if len(fileNameSplitters) >= 4:
fileDict["dbName"]=fileNameSplitters[1]
for splitter in fileNameSplitters[2:len(fileNameSplitters)-2]:
fileDict["dbName"]+="_%s" %(splitter)
fileDict["workload"]=fileNameSplitters[len(fileNameSplitters)-2]
else:
logger.error("Can't parse filename '%s'." %(filename))
os._exit(-1)
fileDict["errors"] = [];
fileDict["warnings"] = [];
fileDict["exceptions"] = [];
fileDict["description"] = "";
# process lines
# Using the blockwise list(islice(..)) version, as it is a little bit faster than pure 'for line in file:'
while True:
lines = list(islice(file, 100))
if not lines:
break
for line in lines:
if re.search("Start Test$",line) != None:
# Starttime whole measurement
process_time(line,"startTime", fileDict, logger)
elif re.search("error",line.lower()) != None:
fileDict["errors"].append(line)
elif re.search("warn",line.lower()) != None:
fileDict["warnings"].append(line)
elif re.search("exception",line.lower()) != None:
fileDict["exceptions"].append(line)
elif re.search("^\[DESCRIPTION\]",line) != None:
fileDict["description"] = line.replace("[DESCRIPTION],","")
if fileDict["description"][0] == " ":
fileDict["description"] = fileDict["description"][1:]
continue
elif re.search("^DESCRIPTION",line) != None:
try:
fileDict["dbDesc"] = line.split("DESCRIPTION: ")[1]
except Exception, e:
logger.warning("Couldn't process DESCRIPTION line '%s', ignoring it." %(line), exc_info=True)
fileDict["dbDesc"] = ""
elif re.search("Start Load$",line) != None:
# Starttime LOAD phase
process_time(line,"startLoadTime", fileDict, logger)
elif re.search("End Load$",line) != None:
# Endtime LOAD phase
process_time(line,"endLoadTime", fileDict, logger)
elif re.search("Start Run$",line) != None:
# Starttime RUN phase
process_time(line,"startRunTime", fileDict, logger)
elif re.search("End Run$",line) != None:
# Endtime RUN phase
process_time(line,"endRunTime", fileDict, logger)
elif re.search("End Test$",line) != None:
# Endtime whole measurement
process_time(line,"endTime", fileDict, logger)
elif re.search("^SPACE:",line) != None:
# found line with space
process_space(line, fileDict, logger)
elif re.search("^TIMESERIES",line) != None:
# if Timeseries is set or unset
if re.search("1$",line) != None:
fileDict["timeseries"] = True
timeSeries = True
elif re.search("0$",line) != None:
fileDict["timeseries"] = False
else:
logger.warning("Couldn't process TIMESERIES line '%s', ignoring it." %(line))
elif re.search("^GRANULARITY",line) != None:
# Granularity for ts
try:
fileDict["granularity"] = int(line.split("GRANULARITY: ")[1])
except Exception, e:
logger.warning("Couldn't process GRANULARITY line '%s', ignoring it." %(line), exc_info=True)
elif re.search("^BUCKET",line) != None:
# histogram Buckets
try:
fileDict["bucket"] = int(line.split("BUCKET: ")[1])
except Exception, e:
logger.warning("Couldn't process BUCKET line '%s', ignoring it." %(line), exc_info=True)
elif re.search("^\[OVERALL\]",line) != None:
if "RunTime" in line:
if "runtimeLoad" in fileDict.keys():
# runtimeLoad was found, now it has to be Run
process_float(line, fileDict, "runtimeRun", logger)
elif "runtimeRun" in fileDict.keys():
# both already found, third one should not happen
logger.error("Found third runTime in '%s'." %(line))
os._exit(-1)
else:
# nothing set, must be Load phase
process_float(line, fileDict, "runtimeLoad", logger)
elif "Throughput" in line:
if "throughputLoad" in fileDict.keys():
# throughputLoad was found, now it has to be Run
process_float(line, fileDict, "throughputRun", logger)
elif "throughputRun" in fileDict.keys():
# both already found, third one should not happen
logger.error("Found third throughput in '%s'." %(line))
os._exit(-1)
else:
# nothing set, must be Load phase
process_float(line, fileDict, "throughputLoad", logger)
else:
logger.error("Did not found 'RunTime' nor 'Throughput' in '%s'." %(line))
os._exit(-1)
# found line with space
process_space(line, fileDict, logger)
elif check_block(line, knownBlocktypes):
# check if fileDict contains the blocks dict
process_block(fileDict, line, knownBlocktypes, actBlock, timeSeries, logger)
## AVG,SUM, usw.
for blockKey in fileDict["blocks"].keys():
## AVG0,SUM0, usw.
for listBlock in fileDict["blocks"][blockKey]:
if "LatencyList" not in listBlock.keys():
logger.error("The 'LatencyList' is missing Block %s in %s." %(blockKey,listBlock,filename))
os._exit(-1)
# "0"-"999" + ">1000" = 1001 Entrys for 1000 buckets for ex.
if len(listBlock["LatencyList"]) != int(fileDict["bucket"])+1:
logger.error("There are buckets missing for %s%s in %s. Available Buckets: %s, configured amount of buckets: %s(+1)." %(blockKey,listBlock,filename,len(listBlock["LatencyList"]),int(fileDict["bucket"])))
os._exit(-1)
try:
file = gzip.open(compressedFileName,"w")
cPickle.dump(fileDict,file)
file.flush()
file.close()
except Exception, e:
logger.error("Can't open '%s' to write. Is it writable?" %(compressedFileName), exc_info=True)
os._exit(-1)
return fileDict
# replaces bokeh.charts.Bar because it can't do logarithmic scale
def generate_bar_plot(dataDict, cat, legendPos, legendOri, title, ylabel, xlabel, width, height, logger):
maxValue=0
for dataKey in dataDict.keys():
for dataPoint in dataDict[dataKey]:
if dataPoint >= maxValue:
try:
dataPointStr = str(dataPoint)
if "." in dataPointStr:
dataPointStr = dataPointStr.split(".")[0]
maxValue = 10**len(dataPointStr)
except:
logger.error("Can't convert '%s' to string. Can't generate bar plot." %(dataPoint))
return None
if maxValue == 0:
logger.error("Maximum value is 0. Can't generate bar plot.")
return None
p = bokeh.plotting.figure(title=title, y_axis_type="log", x_range=cat, y_range=[0,maxValue], width=width, height=height)
dataLen = -1
for dataKey in dataDict.keys():
if dataLen == -1:
dataLen = len(dataDict[dataKey])
else:
if dataLen != len(dataDict[dataKey]):
logger.error("Some dataLists in dataDict have different lengths. Can't generate bar plot.")
return None
if dataLen == -1:
logger.error("Can't find list length. Can't generate bar plot.")
return None
keyLen = float(len(dataDict.keys()))
groupSep = min(max(0.0015,1.0/dataLen),0.1)
barSep = min(max(0.00015,1.0/keyLen),0.025)
barStart = (groupSep - (barSep*(keyLen/2.0)) + barSep)/2.0
barWidth = max((1.0-groupSep-(barSep*keyLen))/keyLen,0.005)
#groupSep = 0.2
defaultColors=[66,88,104,32,131,143,40,92,41,45]
# colors that are hard to see against white (see http://www.w3schools.com/cssref/css_colornames.asp)
chosenColors=[122,48,144,142,109,24,107,55,126,90,91,36,112,76,133,103,130,128,132,94,46,6,58,14,146,70,23,28,96,10,20,99,80,113,31,137]
for dataKey in sorted(dataDict.keys()):
left=[]
right=[]
top=[]
bottom=[]
color = Util.get_random_int_with_chosen_default(0, len(webcolors.css3_names_to_hex.keys())-1,chosenColors,defaultColors)
if color == None:
logger.error("Not enough colors. Can't generate bar plot.")
return None
chosenColors.append(color)
# for dataPoint in dataDict[dataKey][::-1]:
for dataPoint in dataDict[dataKey]:
if len(right) != len(left) != len(top):
logger.error("Some error occured. Can't generate bar plot.")
return None
counter=len(left)+1-((barWidth*keyLen)+groupSep+(barSep*(keyLen/2.0)))/2.0
left.append(counter+barStart)
right.append(counter+barStart+barWidth)
top.append(dataPoint)
bottom.append(0)
logger.info("%s %s %s %s %s %s %s %s %s" %(dataKey, barStart, barWidth, dataLen, counter, groupSep, barSep, left,right))
#p.quad(bottom=bottom, top=top, left=left, right=right, color=webcolors.css3_names_to_hex[webcolors.css3_names_to_hex.keys()[color]], legend="%s_%s"%(dataKey,counter2))
# There is an error on Bokeh in the line above: If any of the top values is zero, alle bars/quads will be drawn as zero. Drawing them non-grouped fixes this.
# This is independent from the set y-range (-1 or 0 does not effect this)
# See https://github.com/bokeh/bokeh/issues/3022
for i in range(0,len(top)):
p.quad(bottom=bottom[i], top=top[i], left=left[i], right=right[i], color=webcolors.css3_names_to_hex[webcolors.css3_names_to_hex.keys()[color]], legend=dataKey)
barStart+= barWidth + barSep
p.xaxis.axis_label_text_font_size = "10pt"
p.yaxis.axis_label_text_font_size = "10pt"
p.yaxis.axis_label = ylabel
p.xaxis.axis_label = xlabel
#p.y_range = (0, 10000)
p.legend.location = legendPos
p.legend.orientation = legendOri
return p
# generates plots for every block/index in dict
# Only for one measurement -> "single"
def generate_plot_single(dict, timeseries, logger, tick=1000):
dataDict = { "histogram" : {}, "general" : {} }
gridplotList = []
gridRowCount = 0
gridRowMax = 2
# Get Data for both plots
if args.debug:
Util.log_timestamp("Start Generating histogramm data",logger)
for block in dict["blocks"].keys():
counter=0
for blockList in dict["blocks"][block]:
dataDict["histogram"]["%s%s" %(block,counter)] = generate_histogram_data(blockList["LatencyList"], logger)
counter += 1
if args.debug:
Util.log_timestamp("Start Generating general data",logger)
dataDict["general"] = generate_general_data(dict["blocks"], True, False, logger)
if "TIMESERIES" in dataDict.keys() and dataDict["TIMESERIES"]:
timeseries = True
if "GRANULARITY" in dataDict.keys():
tick = dataDict["GRANULARITY"]
elif timeseries:
try:
tick = sorted(dataDict["histogram"][dataDict["histogram"].keys()[0]]["cat"])[1]-sorted(dataDict["histogram"][dataDict["histogram"].keys()[0]]["cat"])[0]
except Exception, e:
logger.warning("Couldn't process GRANULARITY on base of histogramm data. Default to 1000.", exc_info=True)
tick=1000
# generate General block plot
#p = bokeh.charts.Bar(dataDict["general"]["data"], cat=dataDict["general"]["cat"], legend="top_right", title="Block results:",
# ylabel='Latency in ms', xlabel = "Type of latency", width=650, height=350)
if args.debug:
Util.log_timestamp("Start Generating bar plot",logger)
p = generate_bar_plot(dataDict["general"]["data"], dataDict["general"]["cat"], "top_left", "horizontal", "Block results:",
"Latency in ms", "Type of latency", 650, 350, logger)
if args.debug:
Util.log_timestamp("End Generating bar plot",logger)
if p == None:
return None
if gridRowMax < 2:
gridplotList.append(p)
elif gridRowCount == gridRowMax-1:
gridplotList[-1].append(p)
gridRowCount=0
else:
gridplotList.append([p])
gridRowCount+=1
if args.debug:
Util.log_timestamp("Start Generating histograms",logger)
# Generate histograms
for key in dataDict["histogram"].keys():
if len(dataDict["histogram"][key]["data"]) < 2 and \
len(dataDict["histogram"][key]["data2"]) < 2 and \
len(dataDict["histogram"][key]["cat"]) < 2:
if "CLEANUP" in key and timeseries:
logger.info("Do not produce '%s' plots since timeseries is active and therefore only 1 value is given." %(key))
else:
logger.warning("Only 1 value for '%s', can't produce plots for this block." %(key))
continue
p = bokeh.plotting.figure(title="Query time for %s" %(key),
x_range=[0,max(dataDict["histogram"][key]["cat"])], y_range=[0, max(dataDict["histogram"][key]["data"])+max(dataDict["histogram"][key]["data"])/10.0],
plot_width=650, plot_height=350)
p.xaxis.axis_label_text_font_size = "10pt"
p.yaxis.axis_label_text_font_size = "10pt"
if timeseries:
p.yaxis.axis_label = "Avg. Latency since last tick (every %s ms) in ms" %(tick)
p.xaxis.axis_label = "Elapsed time in ms"
p.xaxis[0].ticker = bokeh.models.SingleIntervalTicker(interval=tick)
else:
p.xaxis.axis_label = "Time in ms"
p.yaxis.axis_label = "Amount of queries completed"
color = defaultPlotColor
if key in plotColorDict.keys():
color = plotColorDict[key]
if timeseries:
sortedCatList, sortedDataList = (list(t) for t in zip(*sorted(zip(dataDict["histogram"][key]["cat"], dataDict["histogram"][key]["data"]))))
p.line(sortedCatList, sortedDataList, line_width=2)
p.circle(dataDict["histogram"][key]["cat"], dataDict["histogram"][key]["data"], fill_color="white", size=8)
else:
p.rect(x=dataDict["histogram"][key]["cat"], y=dataDict["histogram"][key]["data2"], width=0.8,
height=dataDict["histogram"][key]["data"], color=color, alpha=1)
if gridRowMax < 2:
gridplotList.append(p)
continue
if gridRowCount == gridRowMax-1:
gridplotList[-1].append(p)
gridRowCount=0
else:
gridplotList.append([p])
gridRowCount+=1
if args.debug:
Util.log_timestamp("End Generating histograms",logger)
if args.debug:
Util.log_timestamp("Start adding plots to bokeh",logger)
p = bokeh.io.gridplot(gridplotList)
if args.debug:
Util.log_timestamp("End adding plots to bokeh",logger)
return p
# generates plots for every block/index and every dict in dicts
# Only for more than one measurement -> "multi"
# no histogram in combined plots
# only comparison plots/tables
def generate_plot_multi(dicts, timeseries, logger, tick=1000):
gridplotList = []
dataDictBlocks = {}
# structure of dataDictBlocks should look like this:
# <blocktype>
# -> "data"
# -> <dbName>
# -> [1,2,3]
# ...
# ...
# -> "cat"
# -> <paramName>
# -> ...
# <blockType> e.g. "INSERT0"
# <dbName> e.g. "MySQL"
# <paramName> e.g. "AvgLatency"
# getting blocktypes and data
generate_block_data(dataDictBlocks, dicts, True, False, logger)
# generating general graphs like avgLatency etcpp.
for blockKey in dataDictBlocks.keys():
#p = bokeh.charts.Bar(dataDictBlocks[blockKey]["data"], cat = dataDictBlocks[blockKey]["cat"], legend = "top_right",
# title = "Results for block %s:" % (blockKey), ylabel = 'Latency in ms', xlabel = "Type of latency", width=1300, height=700)
# dataDictBlocks[blockKey]["cat"]=['Avg.', '95Perc.', '99Perc.', 'Min', 'Max',]
# p = generate_bar_plot(dataDictBlocks[blockKey]["data"], dataDictBlocks[blockKey]["cat"][::-1],
p = generate_bar_plot(dataDictBlocks[blockKey]["data"], dataDictBlocks[blockKey]["cat"],
"top_left", "horizontal", "Results for block %s:" % (blockKey),
"Latency in ms", "Type of latency", 1300, 700, logger)
if p != None:
gridplotList.append([p])
else:
logger.error("An error occured while generting plot for %s." %(blockKey))
# generating graphs for runtimeRun/Load
runtimeDict = { "data" : {}, "cat" : [ "runtimeLoad", "runtimeRun" ] }
for key in dicts.keys():
if "runtimeLoad" in dicts[key].keys() and "runtimeRun" in dicts[key].keys():
runtimeDict["data"][key]=[dicts[key]["runtimeLoad"],dicts[key]["runtimeRun"]]
else:
logger.error("Can't find 'runtimeLoad' or/and 'runtimeRun' in %s dict. Can't go on." %(key))
os._exit(-1)
# p = bokeh.charts.Bar(runtimeDict["data"], cat = runtimeDict["cat"], legend = "top_right",
# title = "Results for runtime:", ylabel = 'Runtime in ms', xlabel = "Type of runtime", width=1300, height=700)
# gridplotList.append([p])
p = generate_bar_plot(runtimeDict["data"], runtimeDict["cat"],
"top_left", "horizontal", "Results for runtime:",
"Runtime in ms", "Type of runtime", 1300, 700, logger)
if p != None:
gridplotList.append([p])
else:
logger.error("An error occured while generting plot for runtimeDict.")
# generating graphs for throughputLoad/Run
runtimeDict = { "data" : {}, "cat" : [ "throughputLoad", "throughputRun" ] }
for key in dicts.keys():
if "throughputLoad" in dicts[key].keys() and "throughputRun" in dicts[key].keys():
runtimeDict["data"][key]=[dicts[key]["throughputLoad"],dicts[key]["throughputRun"]]
else:
logger.error("Can't find 'throughputLoad' or/and 'throughputRun' in %s dict. Can't go on." %(key))
os._exit(-1)
# p = bokeh.charts.Bar(runtimeDict["data"], cat = runtimeDict["cat"], legend = "top_right",
# title = "Results for throughput:", ylabel = 'Throughput in operations per sec.', xlabel = "Type of throughput", width=1300, height=700)
# gridplotList.append([p])
p = generate_bar_plot(runtimeDict["data"], runtimeDict["cat"],
"top_left", "horizontal", "Results for throughput:",
"Throughput in operations per sec.", "Type of throughput", 1300, 700, logger)
if p != None:
gridplotList.append([p])
else:
logger.error("An error occured while generting plot for throughput.")
# generating graphs for spaceLoad/spaceRun:
# spaceLoad = spaceBetween - spaceBegin
# spaceRun = spaceEnd - spaceBetween
runtimeDict = { "data" : {}, "cat" : [ "spaceLoad", "spaceRun" ] }
for key in dicts.keys():
if "spaceBegin" in dicts[key].keys() and "spaceEnd" in dicts[key].keys() and "spaceBetween" in dicts[key].keys():
try:
runtimeDict["data"][key]=[int(dicts[key]["spaceBetween"]) - int(dicts[key]["spaceBegin"]), int(dicts[key]["spaceEnd"]) - int(dicts[key]["spaceBetween"])]
except:
logger.error("Error while converting 'spaceBegin' '%s' or/and 'spaceBetween' '%s' or/and 'spaceEnd' '%s' in %s dict to int. Can't go on." %(dicts[key]["spaceBegin"], dicts[key]["spaceBetween"], dicts[key]["spaceEnd"], key))
os._exit(-1)
else:
logger.error("Can't find 'spaceBegin' or/and 'spaceBetween' or/and 'spaceEnd' in %s dict. Can't go on." %(key))
os._exit(-1)
# p = bokeh.charts.Bar(runtimeDict["data"], cat = runtimeDict["cat"], legend = "top_right",
# title = "Results for space consumption:", ylabel = 'Space consumption in Kilobyte (kB)', xlabel = "Type of space consumption", width=1300, height=700)
# gridplotList.append([p])
p = generate_bar_plot(runtimeDict["data"], runtimeDict["cat"],
"top_left", "horizontal", "Results for space consumption:",
"Space consumption in Kilobyte (kB)", "Type of space consumption", 1300, 700, logger)
if p != None:
gridplotList.append([p])
else:
logger.error("An error occured while generting plot for space consumption.")
p = bokeh.io.gridplot(gridplotList)
return p
# generates block data for multi dict block graphs
# in dataDictBlocks the following structure is found after returning:
# <blocktype>
# -> "data"
# -> <dbName>
# -> [1,2,3]
# ...
# ...
# -> "cat"
# -> <paramName>
# -> ...
# <blockType> e.g. "INSERT0"
# <dbName> e.g. "MySQL"
# <paramName> e.g. "AvgLatency"
def generate_block_data(dataDictBlocks, dicts, ignoreSomeParameters, ignoreLess, logger):
firstRound = True # check if everyone has same block types
for key in dicts.keys():
# generate_general_data deliveres the following structure:
# "data"
# -> <blockType>
# -> <parameters>
# "cat"
# -> <paramName>
# -> ...
# we have to move that a little bit...
data = generate_general_data(dicts[key]["blocks"], ignoreSomeParameters, ignoreLess, logger)
keyCopyTmp = list(dataDictBlocks.keys()) # True Copy # check if every block is in every dict
for block in dicts[key]["blocks"].keys():
for index in range(0,len(dicts[key]["blocks"][block])):
blockname = "%s%s" % (block, index)
if firstRound and blockname not in dataDictBlocks.keys():
dataDictBlocks[blockname] = {"data": {}, "cat": []}
if dataDictBlocks[blockname]["cat"] == []:
dataDictBlocks[blockname]["cat"] = data["cat"]
elif blockname not in dataDictBlocks.keys():
logger.error(
"Found blocktype '%s' (index '%s') that does only belong to dict '%s'. Can't move on." % (
block, index, key))
os._exit(-1)
else:
keyCopyTmp.remove(blockname)
if key not in dataDictBlocks[blockname]["data"].keys():
dataDictBlocks[blockname]["data"][key] = data["data"][blockname]
else:
logger.error("Found key '%s' more than once for block '%s', index '%s'. Can't move on." % (
key, block, index))
os._exit(-1)
# check if the right amount of parameters is there
if len(dataDictBlocks[blockname]["data"][key]) != len(dataDictBlocks[blockname]["cat"]) and not ignoreLess:
logger.error("Found more or less parameters than needed in key '%s'. Needed: %s, Found: %s." % (
key, len(dataDictBlocks[blockname]["cat"]), len(dataDictBlocks[blockname]["data"][key])))
os._exit(-1)
if not firstRound:
if len(keyCopyTmp) > 0:
logger.error("Found less keys than needed in '%s'. Needed: '%s'." % (key, keyCopyTmp))
os._exit(-1)
firstRound = False
# generates html file for given html text
def generate_html(p, templateFile, templateDict, outputFile, overwrite, logger):
if not Util.check_file_exists(templateFile):
logger.error("Template file does not exist: '%s'" %(templateFile))
os._exit(-1)
try:
template = jinja2.Environment(loader = jinja2.FileSystemLoader(searchpath=os.path.split(templateFile)[0])).get_template(os.path.split(templateFile)[1])
except Exception, e:
logger.error("Failed load template file '%s'" %(templateFile), exc_info=True)
os._exit(-1)
html = bokeh.embed.file_html(models=p, resources=bokeh.resources.INLINE, title=templateDict["title"] , template=template, template_variables=templateDict)
if Util.check_file_exists(outputFile) and not overwrite:
logger.error("Html file does exist: '%s'. Delete or use overwrite flag." %(outputFile))
os._exit(-1)
try:
file = open(outputFile,"w")
file.write(html)
file.close()
except Exception, e:
logger.error("Error while writing html file '%s'" %(outputFile), exc_info=True)
os._exit(-1)
# generates bokeh histogram_data
# gets data from every "LatencyList"
# data2 is just data/2.0
# commented out code is old and better to read but much slower due to "key not in" - if
def generate_histogram_data(list, logger):
if args.debug:
Util.log_timestamp("Start Generating histogram",logger)
dataDict = { "data" : [], "data2" : [], "cat" : []}
# consits of dicts: data and cat
# counter=0
dataDict["data"]=list
dataDict["data2"]=[i/2.0 for i in list]
dataDict["cat"]=range(0,len(list))
# for value in list:
# # factor 2 has to be divided as you set a "center point" for your rectangles, otherwise 0 won't be 0
# dataDict["data"].append(value)
# dataDict["data2"].append(value/2.0)
# if key not in dataDict["cat"]:
# Util.log_timestamp(counter,logger)
# dataDict["cat"].append(counter)
# counter += 1
if args.debug:
Util.log_timestamp("End Generating histogram",logger)
return dataDict
# generates bokeh general block data
# gets the "blocks" key of fileDict (see above method process_file(...))
# use data from every block in blocks, from every index in every block
# gets every "Throughput","AverageLatency",...
# if you set param you only get your chosen value like Throughput e.g.
def generate_general_data(dict, ignoreSomeParameters, ignoreLess, logger):
dataDict = { "data" : {}, "cat" : [] }
firstRun = True
for block in dict.keys():
for counter in range(0,len(dict[block])):
dataDict["data"]["%s%s" % (block,counter)] = []
parameterArrayCopy = list(dataDict["cat"]) # real copy, not just reference!
for parameter in possibleMissingParams:
if parameter not in dict[block][counter].keys():
logger.warning("Possible that all querys of %s run more than the maximum time measurement period? %s will be -1 for %s." %("%s%s" %(block,counter),parameter, "%s%s" %(block,counter)))
dict[block][counter][parameter]=-1
for parameter in dict[block][counter].keys():
parameterClean = parameter.replace("(ms)","").replace("(us)","").replace("Latency","").replace("thPercentile","Perc.").replace("Average","Avg.")
if parameter not in ignoreParams or (not ignoreSomeParameters and parameter != "LatencyList" ):
if not firstRun and parameterClean not in dataDict["cat"]:
logger.error("There were more parameter in '%s' than in other blocks, that does not work. Parameter %s is too much." %("%s%s" % (block,counter),parameter))
os._exit(-1)
if parameter in convertFromUsToMs:
dataDict["data"]["%s%s" % (block,counter)].append(dict[block][counter][parameter]/1000.0)
else:
dataDict["data"]["%s%s" % (block,counter)].append(dict[block][counter][parameter])
if firstRun:
dataDict["cat"].append(parameterClean)
else:
parameterArrayCopy.remove(parameterClean)
if not firstRun:
if len(parameterArrayCopy) > 0:
if not ignoreLess:
logger.error("There were less parameter in '%s' than in other blocks, that does not work. Parameter left (cleaned -> without (us) or (ms)!): %s." %("%s%s" % (block,counter),parameterArrayCopy))
os._exit(-1)
else:
for param in parameterArrayCopy:
dataDict["data"]["%s%s" % (block,counter)].insert(list(dataDict["cat"]).index(param),"-")
firstRun = False
return dataDict
# Generate resulting html table for single measurement page
def generate_results_table_single(dict, logger):
templateDict={}
templateDict["results_table_name"] = "General results:"
templateDict["block_results_table_name"] = "Block results:"
if dict["dbDesc"] != "":
templateDict["dbdesc_name"] = "Database Description:"
templateDict["dbdesc"] = dict["dbDesc"]
if dict["description"] != "":
templateDict["description_name"] = "Description:"
templateDict["description"] = dict["description"]
if dict["errors"] != []:
templateDict["errors_name"] = "Erros:"
templateDict["errors"] = ""
for error in dict["errors"]:
templateDict["errors"] += "%s<br>" %(error)
if dict["warnings"] != []:
templateDict["warnings_name"] = "Warnings:"
templateDict["warnings"] = ""
for warning in dict["warnings"]:
templateDict["warnings"] += "%s<br>" %(warning)
if dict["exceptions"] != []:
templateDict["exceptions_name"] = "Exceptions:"
templateDict["exceptions"] = ""
for exception in dict["exceptions"]:
templateDict["exceptions"] += "%s<br>" %(exception)
templateDict["title"] = "%s_%s_%s" %(dict["dbName"], dict["workload"], dict["startTime"].strftime("%Y%m%d%H%M"))
# Generate 'General' Results Table
tableHtml="<thead><tr><th>Parametername:</th><th>Value:</th></tr></thead>"
tableHtml+="<tbody>"
for key in dict.keys():
if key != "blocks":
tableHtml+="<tr><th scope=\"row\">%s</th><td>%s</td></tr>" % (key,dict[key])
tableHtml+="</tbody>"
templateDict["results_table"] = tableHtml
# Generate 'Block' Results Table
# which means results of every block
# no histogram/timeseries data ('LatencyList'), only more general values like throughput,etc.
paramDict={} # dict of parameterName : [value1,value2..] -> represents table rows!
firstRun=True # check if all blocks have all parameters (same parameter set)
# creates dict for html table, every key has an array of one row
for block in dict["blocks"]:
for index in range(0,len(dict["blocks"][block])):
tmpparamDict = paramDict.copy() #check if all entries were there, if not use "-"
for param in dict["blocks"][block][index]:
if param == "LatencyList":
continue
if param not in paramDict.keys():
if not firstRun:
logger.error("Found '%s' in '%s%s' which other blocks do not have." %(param,block,index))
os._exit(-1)
paramDict[param]=[dict["blocks"][block][index][param]]
else:
paramDict[param].append(dict["blocks"][block][index][param])
if dict["blocks"][block][index][param] == -1 and param in possibleMissingParams :
if dict["warnings"] == []:
templateDict["warnings_name"] = "Warnings:"
templateDict["warnings"] = ""
paramStr = ""
for possibleMissingParam in possibleMissingParams:
paramStr += ", %s" %(possibleMissingParam)
paramStr = paramStr[2:]
templateDict["warnings"] += "%s<br>" %("Values of -1 for %s means that these values were not calculated by ycsb, mostly due to query times longer than the given bucketsize." %(paramStr))
if not firstRun:
tmpparamDict.pop(param)
# Fix missing parameters for this row
if not firstRun:
for key in tmpparamDict:
paramDict[key].append("-")
firstRun = False
# counting amount of columns needed
tableColumnsCounter = 1 # 1 because left column is already there
indexSaver = 0 # Saves next index in case of row break
indexMax = 0
tableHtml=""
for block in dict["blocks"]:
for index in dict["blocks"][block]:
indexMax += 1
while indexSaver < indexMax:
if indexSaver+tableColumnsCounter > indexMax:
break
if tableColumnsCounter >= maxTableColumnsSingle:
indexSaver+=tableColumnsCounter-1
if indexSaver >= indexMax:
break
tableColumnsCounter = 1
tableHtml+="<tr>"
for k in range(0,maxTableColumnsSingle+1):
tableHtml+="<td></td>"
tableHtml+="</tr></tbody>"
continue
tableHtml+="<thead><tr><th>Parametername:</th>"
indexCounter=0 # to find the right index again
for block in dict["blocks"]:
for index in range(0,len(dict["blocks"][block])):
if indexCounter >= indexSaver:
if tableColumnsCounter >= maxTableColumnsSingle:
break
else:
tableHtml+="<th>%s%s:</th>" % (block,index)
tableColumnsCounter += 1
indexCounter+=1
tableHtml+="</tr></thead>"
tableHtml+="<tbody>"
for key in paramDict.keys():
tableHtml+="<tr><th scope=\"row\">%s</th>" % (key)
tableColumnsCounter2 = 1
indexCounter2=0 # to find the right index again
for number in paramDict[key]:
if indexCounter2 >= indexSaver:
if tableColumnsCounter2 >= maxTableColumnsSingle:
break
else:
tableHtml+="<td>%s</td>" % (number)
tableColumnsCounter2+=1
indexCounter2+=1
tableHtml+="</tr>"
tableHtml+="</tbody>"
templateDict["block_results_table"] = tableHtml
return templateDict
# Generate resulting html table for multi measurement page
def generate_results_table_multi(dicts, fileName, logger):
dataDictBlocks = {}
# structure of dataDictBlocks should look like this:
# <blocktype>
# -> "data"
# -> <dbName>
# -> [1,2,3]
# ...
# ...
# -> "cat"
# -> <paramName>
# -> ...
# <blockType> e.g. "INSERT0"
# <dbName> e.g. "MySQL"
# <paramName> e.g. "AvgLatency"
# we do the same here as for multi dict block plots
generate_block_data(dataDictBlocks, dicts, False, True, logger)
templateDict={}
templateDict["results_table_name"] = "General results:"
templateDict["block_results_table_name"] = "Block results:"
templateDict["title"] = fileName
if len(dicts.keys()) > 0:
if dicts[dicts.keys()[0]]["description"] != "":
templateDict["description_name"] = "Description:"
templateDict["description"] = dicts[dicts.keys()[0]]["description"]
templateDict["dbdesc_name"] = "Database Descriptions:"
dbDescStr = "<table class=\"bk-bs-table\"><tbody>"
counter=0
for dbKey in sorted(dicts.keys()):
if counter%2 == 0:
dbDescStr += "<tr>"
dbDescStr += "<th scope=\"row\">%s:</th><td>%s</td>" %(dicts[dbKey]["dbName"],dicts[dbKey]["dbDesc"])
if counter%2 != 0:
dbDescStr += "</tr>"
counter+=1
templateDict["dbdesc"] = dbDescStr + "</tbody></table>"
errorsOccurred = False
errors = []
warningsOccurred = False
warnings = []
exceptionsOccurred = False
exceptions = []
for dictKey in sorted(dicts.keys()):
if dicts[dictKey]["errors"] != []:
errorsOccurred = True
errors.append(dictKey)
if dicts[dictKey]["warnings"] != []:
warningsOccurred = True
warnings.append(dictKey)
if dicts[dictKey]["exceptions"] != []:
exceptionsOccurred = True
exceptions.append(dictKey)
if errorsOccurred:
templateDict["errors"] = "Errors occured in some of the measurements (%s)." %(errors)
if warningsOccurred:
templateDict["warnings"] = "Warnings occured in some of the measurements (%s)." %(warnings)
# Search for -1 in 95th and 99th Percentiles
minus1Occured = False
for blockType in dataDictBlocks.keys():
if "99Perc." in dataDictBlocks[blockType]["cat"] or \
"95Perc." in dataDictBlocks[blockType]["cat"]:
for dbKey in dataDictBlocks[blockType]["data"].keys():
if dataDictBlocks[blockType]["data"][dbKey][dataDictBlocks[blockType]["cat"].index("99Perc.")] == -1 or \
dataDictBlocks[blockType]["data"][dbKey][dataDictBlocks[blockType]["cat"].index("95Perc.")] == -1:
minus1Occured = True
if minus1Occured:
if "warnings" not in templateDict.keys() or templateDict["warnings"] == None or templateDict["warnings"] == "":
templateDict["warnings"] = "Warning: %s" %("Values of -1 for 99Perc. or 95Perc. means that these values were not calculated by ycsb, mostly due to query times longer than the given bucketsize.")
else:
templateDict["warnings"] += "<br>Warning:%s" %("Values of -1 for 99Perc. or 95Perc. means that these values were not calculated by ycsb, mostly due to query times longer than the given bucketsize.")
if exceptionsOccurred:
templateDict["exceptions"] = "Exceptions occured in some of the measurements (%s)." %(exceptions)
# generate for every block one table:
tableHtml="<h2>Block results:</h2>"
for blockType in dataDictBlocks.keys():
tableColumnsCounter = 1 # 1 because left column is already there
indexSaver = 0 # Saves next index in case of row break
tableHtml+="<h3>%s</h3><table class=\"bk-bs-table\"><thead>" %(blockType)
while indexSaver < len(sorted(dataDictBlocks[blockType]["data"].keys())):
if tableColumnsCounter > len(dataDictBlocks[blockType]["data"].keys()):
break
if tableColumnsCounter >= maxTableColumnsMulti:
indexSaver+=tableColumnsCounter-1
if indexSaver >= len(dataDictBlocks[blockType]["data"].keys()) :
break
tableColumnsCounter = 1
tableHtml+="<tr>"
for k in range(0,maxTableColumnsMulti+1):
tableHtml+="<td></td>"
tableHtml+="</tr></tbody>"
continue
# print blockType
tableHtml+="<tr><th>Parametername:</th>"
indexCounter=0 # to find the right index again
# go through all dbs
for dbKey in sorted(dataDictBlocks[blockType]["data"].keys()):
if indexCounter >= indexSaver:
if tableColumnsCounter >= maxTableColumnsMulti:
break
else:
tableHtml+="<th>%s:</th>" % (dbKey)
tableColumnsCounter += 1
indexCounter+=1
tableHtml+="</tr></thead>"
tableHtml+="<tbody>"
# go through all parameters
for param in dataDictBlocks[blockType]["cat"]:
tableHtml+="<tr><th scope=\"row\">%s</th>" % (param)
tableColumnsCounter2 = 1
indexCounter2=0 # to find the right index again
for dbKey in sorted(dataDictBlocks[blockType]["data"].keys()):
if indexCounter2 >= indexSaver:
if tableColumnsCounter2 >= maxTableColumnsMulti:
break
else:
tableHtml+="<td>%s</td>" % (dataDictBlocks[blockType]["data"][dbKey][dataDictBlocks[blockType]["cat"].index(param)])
tableColumnsCounter2+=1
indexCounter2+=1
tableHtml+="</tr>"
tableHtml+="</tbody></table>"
templateDict["block_results_table"] = tableHtml
# generate general result table
tableHtml="<h2>General results:</h2><table class=\"bk-bs-table\"><thead><tr><th>Parametername:</th>"
parameterArr = []
ignoreParams = ["blocks"] # ignore complete
subParams = ["timeseries","granularity", "bucket"] # substitute with "-"
valueDict = {} # every table row should be inside
firstRound = True # again to check that every dict has every parameter
dbNameKey = "dbNameKey" # some name that identifies our dbName row
for dictKey in dicts:
if dbNameKey not in valueDict.keys():
valueDict[dbNameKey] = [dictKey]
elif dbNameKey in valueDict.keys() and firstRound:
logger.error("A parameter is named 'dbNameKey', please change it. Abort.")
os._exit(-1)
else:
valueDict[dbNameKey].append(dictKey)
copyParameterArr = list(parameterArr) #real copy to check if nothings missing
for key in dicts[dictKey]:
if key in ignoreParams:
continue
if firstRound:
if key not in parameterArr:
parameterArr.append(key)
else:
try:
copyParameterArr.remove(key)
except Exception, e:
logger.error("Error: '%s' has too many keys." %(key), exc_info=True)
os._exit(-1)
if key not in valueDict.keys():
valueDict[key] = [dicts[dictKey][key]]
else:
valueDict[key].append(dicts[dictKey][key])
if not firstRound:
if len(copyParameterArr) > 0:
for arg in list(copyParameterArr): #copy otherwise removing kills it
if arg in subParams:
valueDict[arg].append("-")
copyParameterArr.remove(arg)
if len(copyParameterArr) == 0:
continue
logger.error("Error: '%s' has too less keys. Left: '%s'" %(dictKey, copyParameterArr), exc_info=True)
os._exit(-1)
firstRound = False
tableColumnsCounter = 1 # 1 because left column is already there
indexSaver = 0 # Saves next index in case of row break
#print valueDict
for rowKey in valueDict:
if tableColumnsCounter > len(valueDict[dbNameKey]):
break
if tableColumnsCounter >= maxTableColumnsMulti:
indexSaver+=tableColumnsCounter-1
if indexSaver >= len(valueDict[dbNameKey]) :
break
tableColumnsCounter = 1
tableHtml+="<tr>"
for k in range(0,maxTableColumnsMulti+1):
tableHtml+="<td></td>"
tableHtml+="</tr></tbody>"
tableHtml+="<tr><th>Parametername:</th>"
continue
indexCounter=0 # to find the right index again
# go through all dbs
for dbKey in sorted(valueDict[dbNameKey]):
if indexCounter >= indexSaver:
if tableColumnsCounter >= maxTableColumnsMulti:
break
else:
tableHtml+="<th>%s:</th>" % (dbKey)
tableColumnsCounter += 1
indexCounter+=1
tableHtml+="</tr></thead>"
tableHtml+="<tbody>"
# go through all parameters
for param in sorted(valueDict.keys()):
if param == dbNameKey:
continue
tableHtml+="<tr><th scope=\"row\">%s</th>" % (param)
tableColumnsCounter2 = 1
indexCounter2=0 # to find the right index again
for value in valueDict[param]:
if indexCounter2 >= indexSaver:
if tableColumnsCounter2 >= maxTableColumnsMulti:
break
else:
tableHtml+="<td>%s</td>" % (value)
tableColumnsCounter2+=1
indexCounter2+=1
tableHtml+="</tr>"
tableHtml+="</tbody></table>"
templateDict["general_results_table"] = tableHtml
return templateDict
def generate_pdf(htmlFile, overwrite, pdfOptions, logger):
if not Util.check_file_exists(htmlFile):
logger.error("Html file does not exist: '%s'" %(htmlFile))
os._exit(-1)
pdfOptions["title"]= "%s" %(os.path.splitext(os.path.basename(htmlFile))[0])
pdfFile="%s.pdf" % (pdfOptions["title"])
if Util.check_file_exists(pdfFile) and not overwrite:
logger.error("Pdf file does already exist: '%s'. Use overwrite flag or delete it." %(pdfFile))
os._exit(-1)
try:
pdfkit.from_file(htmlFile,pdfFile,options=pdfOptions)
except Exception, e:
logger.error("Failed to produce pdf file '%s.pdf'" %(pdfFile), exc_info=True)
os._exit(-1)
def openCompressedFile(ycsbfile, dict, key, decompress, overwrite, logger):
try:
file = gzip.open(ycsbfile,"r")
dict[key]=cPickle.load(file)
file.close()
except Exception, e:
logger.error("Can't open '%s'. Is it really a compressed .ydc file?" %(ycsbfile), exc_info=True)
os._exit(-1)
# if you truly just want to decompress it, stop after saving plain ycsb file
if decompress:
try:
newFileName=os.path.splitext(os.path.basename(ycsbfile))[0]+".log"
if (not Util.check_file_exists(newFileName) or overwrite) and os.access(".", os.W_OK):
if key in dict.keys() and dict[key] != None:
decompressFile(dict[key], newFileName, logger)
else:
logger.error("Dictionary does not have filecontent or is null." , exc_info=True)
os._exit(-1)
else:
logger.error("Can't create '%s' to write. Does it already exist?" %(newFileName), exc_info=True)
os._exit(-1)
except Exception, e:
logger.error("Can't open '%s'." %("%s.log.log" %(os.path.basename(ycsbfile))), exc_info=True)
os._exit(-1)
def decompressFile(fileDict, newFileName,logger):
neededKeys = ["timeseries","granularity","description","bucket","startTime","startLoadTime","runtimeLoad",
"throughputLoad","description","endLoadTime","startRunTime","runtimeRun","throughputRun",
"description","endRunTime", "endTime", "spaceBegin", "blocks", "errors", "warnings", "exceptions"]
neededBlockKeys = ["Operations","LatencyList","MaxLatency(us)","MinLatency(us)","99thPercentileLatency(us)",
"95thPercentileLatency(us)", "AverageLatency(us)"]
# return not in this list, as CLEANUP blocks have no return
for key in neededKeys:
if key not in fileDict.keys():
logger.error("'%s' is missing in ydc file, abort." %(key))
return False
file = open("%s" % (newFileName), "w")
if "timeseries" in fileDict.keys():
if fileDict["timeseries"]:
file.write("TIMESERIES: 1\n")
else:
file.write("TIMESERIES: 0\n")
if "granularity" in fileDict.keys():
file.write("GRANULARITY: %s\n" %(fileDict["granularity"]))
if "dbDesc" in fileDict.keys():
des=fileDict["dbDesc"]
if des[-1] == "\n":
des = des[:-1]
file.write("DESCRIPTION: %s\n" %(des))
if "bucket" in fileDict.keys():
file.write("BUCKET: %s\n" %(fileDict["bucket"]))
if "startTime" in fileDict.keys():
file.write("START: %s: Start Test\n" %(fileDict["startTime"].strftime('%a %b %d %H:%M:%S %Z %Y')))
if "startLoadTime" in fileDict.keys():
file.write("START: %s: Start Load\n" %(fileDict["startLoadTime"].strftime('%a %b %d %H:%M:%S %Z %Y')))
if "runtimeLoad" in fileDict.keys():
file.write("[OVERALL], RunTime(ms), %s\n" %(fileDict["runtimeLoad"]))
if "throughputLoad" in fileDict.keys():
file.write("[OVERALL], Throughput(ops/sec), %s\n" %(fileDict["throughputLoad"]))
## load blocks
for key in ["CLEANUP", "INSERT" ]:
if key in fileDict["blocks"] and len(fileDict["blocks"][key]) > 0:
for key2 in neededBlockKeys:
if key2 not in fileDict["blocks"][key][0].keys():
logger.error("'%s' is missing in ydc file block '%s'0, abort." %(key2, key))
return False
if "Operations" in fileDict["blocks"][key][0].keys():
file.write("[%s], Operations, %s\n" %(key,fileDict["blocks"][key][0]["Operations"]))
if "AverageLatency(us)" in fileDict["blocks"][key][0].keys():
file.write("[%s], AverageLatency(us), %s\n" %(key,fileDict["blocks"][key][0]["AverageLatency(us)"]))
if "MinLatency(us)" in fileDict["blocks"][key][0].keys():
file.write("[%s], MinLatency(us), %s\n" %(key,fileDict["blocks"][key][0]["MinLatency(us)"]))
if "MaxLatency(us)" in fileDict["blocks"][key][0].keys():
file.write("[%s], MaxLatency(us), %s\n" %(key,fileDict["blocks"][key][0]["MaxLatency(us)"]))
if "95thPercentileLatency(us)" in fileDict["blocks"][key][0].keys():
file.write("[%s], 95thPercentileLatency(us), %s\n" %(key,fileDict["blocks"][key][0]["95thPercentileLatency(us)"]))
if "99thPercentileLatency(us)" in fileDict["blocks"][key][0].keys():
file.write("[%s], 99thPercentileLatency(us), %s\n" %(key,fileDict["blocks"][key][0]["99thPercentileLatency(us)"]))
if "Return" in fileDict["blocks"][key][0].keys():
for returnVal in fileDict["blocks"][key][0]["Return"]:
file.write("[%s], Return=%s, %s\n" %(key,returnVal[0],returnVal[1]))
if "LatencyList" in fileDict["blocks"][key][0].keys():
for counter in range(0,len(fileDict["blocks"][key][0]["LatencyList"])-1):
file.write("[%s], %s, %s\n" %(key, counter, fileDict["blocks"][key][0]["LatencyList"][counter]))
file.write("[%s], >%s, %s\n" %(key, len(fileDict["blocks"][key][0]["LatencyList"])-1, fileDict["blocks"][key][0]["LatencyList"][-1]))
# block latency data
if "description" in fileDict.keys():
des=fileDict["description"]
if des[-1] == "\n":
des = des[:-1]
file.write("[DESCRIPTION], %s\n" %(des))
if "endLoadTime" in fileDict.keys():
file.write("END: %s: End Load\n" %(fileDict["endLoadTime"].strftime('%a %b %d %H:%M:%S %Z %Y')))
if "startRunTime" in fileDict.keys():
file.write("START: %s: Start Run\n" %(fileDict["startRunTime"].strftime('%a %b %d %H:%M:%S %Z %Y')))
if "runtimeRun" in fileDict.keys():
file.write("[OVERALL], RunTime(ms), %s\n" %(fileDict["runtimeRun"]))
if "throughputRun" in fileDict.keys():
file.write("[OVERALL], Throughput(ops/sec), %s\n" %(fileDict["throughputRun"]))
## run blöcke
for key in ["CLEANUP", "INSERT", "READ", "UPDATE", "SCAN", "AVG", "SUM", "COUNT"]:
if key in fileDict["blocks"] and len(fileDict["blocks"][key]) > 0:
for index in range(0,len(fileDict["blocks"][key])):
if index == 0 and key in ["CLEANUP", "INSERT" ]:
# First Cleanup/Insert block is from load phase -> ignore it
continue
for key2 in neededBlockKeys:
if key2 not in fileDict["blocks"][key][index].keys():
logger.error("'%s' is missing in ydc file block '%s'0, abort." %(key2, key))
return False
if "Operations" in fileDict["blocks"][key][index].keys():
file.write("[%s], Operations, %s\n" %(key,fileDict["blocks"][key][index]["Operations"]))
if "AverageLatency(us)" in fileDict["blocks"][key][index].keys():
file.write("[%s], AverageLatency(us), %s\n" %(key,fileDict["blocks"][key][index]["AverageLatency(us)"]))
if "MinLatency(us)" in fileDict["blocks"][key][index].keys():
file.write("[%s], MinLatency(us), %s\n" %(key,fileDict["blocks"][key][index]["MinLatency(us)"]))
if "MaxLatency(us)" in fileDict["blocks"][key][index].keys():
file.write("[%s], MaxLatency(us), %s\n" %(key,fileDict["blocks"][key][index]["MaxLatency(us)"]))
if "95thPercentileLatency(us)" in fileDict["blocks"][key][index].keys():
file.write("[%s], 95thPercentileLatency(us), %s\n" %(key,fileDict["blocks"][key][index]["95thPercentileLatency(us)"]))
if "99thPercentileLatency(us)" in fileDict["blocks"][key][index].keys():
file.write("[%s], 99thPercentileLatency(us), %s\n" %(key,fileDict["blocks"][key][index]["99thPercentileLatency(us)"]))
if "Return" in fileDict["blocks"][key][index].keys():
for returnVal in fileDict["blocks"][key][index]["Return"]:
file.write("[%s], Return=%s, %s\n" %(key,returnVal[index],returnVal[1]))
if "LatencyList" in fileDict["blocks"][key][index].keys():
for counter in range(0,len(fileDict["blocks"][key][index]["LatencyList"])-1):
file.write("[%s], %s, %s\n" %(key, counter, fileDict["blocks"][key][index]["LatencyList"][counter]))
file.write("[%s], >%s, %s\n" %(key, len(fileDict["blocks"][key][index]["LatencyList"])-1, fileDict["blocks"][key][index]["LatencyList"][-1]))
if "description" in fileDict.keys():
des=fileDict["description"]
if des[-1] == "\n":
des = des[:-1]
file.write("[DESCRIPTION], %s\n" %(des))
if "endRunTime" in fileDict.keys():
file.write("END: %s: End Run\n" %(fileDict["endRunTime"].strftime('%a %b %d %H:%M:%S %Z %Y')))
if "endTime" in fileDict.keys():
file.write("END: %s: End Test\n" %(fileDict["endTime"].strftime('%a %b %d %H:%M:%S %Z %Y')))
if "errors" in fileDict.keys():
for line in fileDict["errors"]:
line2 = line
if line2[-1] == "\n":
line2=line2[:-1]
file.write("%s\n" %(line2))
if "exceptions" in fileDict.keys():
for line in fileDict["exceptions"]:
line2 = line
if line2[-1] == "\n":
line2=line2[:-1]
file.write("%s\n" %(line2))
if "warnings" in fileDict.keys():
for line in fileDict["warnings"]:
line2 = line
if line2[-1] == "\n":
line2=line2[:-1]
file.write("%s\n" %(line2))
if "spaceBegin" in fileDict.keys() and "spaceBetween" in fileDict.keys() and "spaceEnd" in fileDict.keys():
file.write("SPACE: %s %s %s\n" %(fileDict["spaceBegin"],fileDict["spaceBetween"],fileDict["spaceEnd"]))
file.flush()
file.close()
# Generates (html, pdf) Output for every single measurement file
def generate_single_output(dict, templateFile, name, timeseries, overwrite, pdfOptions, logger):
if args.debug:
Util.log_timestamp("Start Generating Single Plot",logger)
p = generate_plot_single(dict, timeseries, logger)
if p == None:
logger.error("Can't generate plots for %s." %(name))
return
if args.debug:
Util.log_timestamp("Start Generating Single Results Table",logger)
templateDict = generate_results_table_single(dict, logger)
if args.debug:
Util.log_timestamp("Start Generating HTML File",logger)
# Generating html
generate_html(p, templateFile, templateDict, "%s.html" % (name), overwrite, logger)
# Generating pdf (if wanted)
if args.pdf:
if args.debug:
Util.log_timestamp("Start Generating PDF File",logger)
generate_pdf("%s.html" % (name), overwrite, pdfOptions, logger)
# Generates (html, pdf) Output for multi (combined) measurement files
def generate_multi_output(dicts, templateFile, timeseries, overwrite, logger, granularity=1000):
granularity=granularity
ts=None
for dictKey in dicts.keys():
if "granularity" in dicts[dictKey].keys():
granularity = dicts[dictKey]["granularity"]
if ts == None and "timeseries" in dicts[dictKey].keys():
ts = dicts[dictKey]["timeseries"]
elif ts != None and "timeseries" in dicts[dictKey].keys() and ts != dicts[dictKey]["timeseries"]:
logger.error("Found one timeseries and one non timeseries type. Abort.")
exit(-1)
if not ts and timeseries:
logger.error("Found at least one non timeseries type and timeseries flag is set. Abort.")
exit(-1)
if ts == None and timeseries:
ts = True
elif ts == None:
ts = False
fileName = "ycsb_combined_%s" % (datetime.datetime.now().strftime("%Y%m%d%H%M"))
p = generate_plot_multi(dicts, ts, logger, )
templateDict = generate_results_table_multi(dicts, fileName, logger)
# Generating html
generate_html(p, templateFile, templateDict, "%s.html" % (fileName), overwrite, logger)
# Configure ArgumentParser
parser = argparse.ArgumentParser(prog="ProcessYcsbLog.py",version=__version__,description="Generates a nice pdf out of YCSB's output.", formatter_class=argparse.RawDescriptionHelpFormatter, epilog="")
parser.add_argument("-l", "--log", action='store_true', help="Be more verbose")
parser.add_argument("-t", "--timeseries", action='store_true', help="Use this flag if you generated timeseries instead of a histogram in ycsb")
parser.add_argument("-f", "--ycsbfiles", metavar="YCSBFILE", nargs='+', required=True, help="Path to YCSB file(s) (can be ycsb outputfile, compressed .ydc file)")
parser.add_argument("-d", "--decompress", action='store_true', help="Decompress ycsb file out of given ydc file")
parser.add_argument("-c", "--compress", action='store_true', help="Stop after making compressed ydc file (do not generate plots/html/pdf)")
parser.add_argument("-o", "--overwrite", action='store_true', help="Overwrite existing files")
parser.add_argument("-p", "--pdf", action='store_true', help="Generate PDF file. (otherwise only pdf is generated)")
parser.add_argument("-s", "--single", action='store_true', help="if given multiple files, also generate single html/pdf for each")
parser.add_argument("--debug", action='store_true', help="Be much more verbose, print timestamps. (also activates -l)")
args = parser.parse_args()
if args.debug:
args.log=True
# Configure Logging
logLevel = logging.WARN
if args.log:
logLevel = logging.DEBUG
if args.debug:
FORMAT = '%(asctime)-15s: %(message)s'
logging.basicConfig(level=logLevel,format=FORMAT)
else:
logging.basicConfig(level=logLevel)
logger = logging.getLogger(__name__)
if args.debug:
Util.log_timestamp("Start",logger)
# delete non unique files, even if they're given as ycb output and ydc
for ycsbfile in args.ycsbfiles:
if args.ycsbfiles.count("%s.ydc" %(os.path.splitext(os.path.basename(ycsbfile))[0])) \
+ args.ycsbfiles.count("%s.log" %(os.path.splitext(os.path.basename(ycsbfile))[0])) \
> 1:
args.ycsbfiles.remove(ycsbfile)
logger.warning("'%s' exists more than once (as .log or .ydc), ignoring it." %(ycsbfile))
dicts={}
nameDict={} # stores names for dictKeys
threads=[]
if args.debug:
Util.log_timestamp("Start Loading/Parsing Files",logger)
for ycsbfile in args.ycsbfiles:
# Compressing Output
mime = magic.open(magic.MAGIC_MIME)
mime.load()
try:
basename="%s" %("_".join(os.path.splitext(os.path.basename(ycsbfile))[0].split("_")[1:-2]))
except IndexError:
logger.warning("'%s' is not a normal filename for ycsb-ts logfiles." %(os.path.basename(ycsbfile)))
basename="%s" %(os.path.splitext(os.path.basename(ycsbfile))[0])
except:
logger.error("Can't process filename '%s' as basename." %(os.path.basename(ycsbfile)))
os._exit(-1)
if basename in dicts.keys():
basename="%s_%s" %(basename,dicts.keys().count(basename)+1)
if mime.file(ycsbfile) == "text/plain; charset=us-ascii" or mime.file(ycsbfile) == "text/plain; charset=utf-8":
newFileName="%s.ydc" %(os.path.splitext(os.path.basename(ycsbfile))[0])
# if it is an plain ycsb file, compress it
logger.info("Found Mime type '%s' in '%s'." %(ycsbfile,mime.file(ycsbfile)))
if (not Util.check_file_exists(newFileName) or args.overwrite) and os.access(".", os.W_OK):
dicts[basename] = {}
nameDict[basename]=os.path.splitext(os.path.basename(ycsbfile))[0]
threads.append(threading.Thread(target=process_file, args=(ycsbfile, args.timeseries, dicts[basename], newFileName, logger)))
threads[-1].setDaemon(True)
threads[-1].start()
else:
logger.error("Can't create '%s' to write. Does it already exist?" %(newFileName), exc_info=True)
exit(-1)
elif mime.file(ycsbfile) == "application/gzip; charset=binary":
# if a compressed file is found do decompress or graph
logger.info("Found Mime type '%s' in '%s'." %(ycsbfile,mime.file(ycsbfile)))
# if decompress is what you want...
if Util.check_file_exists(ycsbfile) and os.access(ycsbfile, os.R_OK):
dicts[basename] = {}
nameDict[basename]=os.path.splitext(os.path.basename(ycsbfile))[0]
threads.append(threading.Thread(target=openCompressedFile, args=(ycsbfile, dicts, basename, args.decompress, args.overwrite, logger)))
threads[-1].setDaemon(True)
threads[-1].start()
else:
logger.error("Can't open '%s'. Does it exist?" %(ycsbfile), exc_info=True)
exit(-1)
else:
logger.error("%s has an unkown mimetype '%s', sure it is a ycsb log or .ydc file?" %(ycsbfile,mime.file(ycsbfile)))
exit(-1)
# Wait until all threads are done
logger.debug("Waiting until all files are loaded...")
# only join() would make ctrl+c not work in combination with daemon=true
# Main thread is always there, because of that: >1
while threading.activeCount() > 1:
for thread in threads:
thread.join(100)
threads=[]
if args.debug:
Util.log_timestamp("End Loading/Parsing Files",logger)
# if only compression/decompression is requested, do it and exit
if args.compress or args.decompress:
exit(0)
if (len(args.ycsbfiles)==1 and len(dicts.keys())==1) or (len(args.ycsbfiles)>1 and args.single):
if args.debug:
Util.log_timestamp("Start Generating Single HTML File",logger)
for key in dicts.keys():
# at this stage we should have something in dict, wheter it was from a plain ycsb file or from a compressed ydc file
# Generating Graph
if maxTableColumnsSingle < 2:
logger.error("maxTableColumnsSingle has to be > 1")
exit(-1)
threads.append(threading.Thread(target=generate_single_output,
args=(dicts[key], templateFile, nameDict[key],
args.timeseries, args.overwrite, pdfOptions.copy(), logger)))
threads[-1].setDaemon(True)
threads[-1].start()
if len(args.ycsbfiles)>1:
if len(dicts.keys())==len(args.ycsbfiles):
logger.info("Found more than one ycsb/ydc file, doing combined stuff...")
if args.debug:
Util.log_timestamp("Start Generating Multiple HTML Files",logger)
if maxTableColumnsMulti < 2:
logger.error("maxTableColumnsMulti has to be > 1")
exit(-1)
# in dicts there should be every ycsbfile now
# First check: are both non timeseries or timeseries
# 3 cases that are allowed:
# - all are set as timeseries
# - all are set as non-timeseries
# - all have no flag set and are just used as histogram or forced to be used as timeseries by flag
# Case 1 and 2:
threads.append(threading.Thread(target=generate_multi_output,
args=(dicts, templateFileMulti, args.timeseries, args.overwrite, logger)))
threads[-1].setDaemon(True)
threads[-1].start()
else:
logger.error(" %s Files and %s Dicts do not match, this should not happen." %(len(args.ycsbfiles),len(dicts.keys())))
exit(-1)
# Wait until all threads are done
logger.debug("Waiting until all .html/.pdf files are generated...")
# only join() would make ctrl+c not work in combination with daemon=true
# Main thread is always there, because of that: >1
while threading.activeCount() > 1:
for thread in threads:
thread.join(100)
if args.debug:
Util.log_timestamp("End Generating HTML File(s)",logger)
exit(0) | 50.703704 | 267 | 0.589935 |
79450780a27f3f1d891b198a1775fc502c90eac5 | 1,162 | py | Python | dancingtogether/runner.py | rgardner/dancingtogether | 48b52cadb2059df56a6b7c95bae947cd849b758a | [
"MIT"
] | null | null | null | dancingtogether/runner.py | rgardner/dancingtogether | 48b52cadb2059df56a6b7c95bae947cd849b758a | [
"MIT"
] | 19 | 2019-03-29T04:01:43.000Z | 2020-07-10T18:55:23.000Z | dancingtogether/runner.py | rgardner/dancingtogether | 48b52cadb2059df56a6b7c95bae947cd849b758a | [
"MIT"
] | null | null | null | from django.test.runner import DiscoverRunner
import pytest
class PytestTestRunner(DiscoverRunner):
"""Runs pytest to discover and run tests."""
def __init__(self, *args, junit_xml=None, **kwargs):
self.junit_xml = junit_xml
super().__init__(*args, **kwargs)
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
'--junit-xml',
help='Create junit-xml style report file at given path')
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""Run pytest and return the exitcode.
It translates some of Django's test command option to pytest's.
"""
argv = []
if self.verbosity == 0:
argv.append('--quiet')
elif self.verbosity == 2:
argv.append('--verbose')
elif self.verbosity == 3:
argv.append('-vv')
if self.failfast:
argv.append('--exitfirst')
if self.keepdb:
argv.append('--reuse-db')
if self.junit_xml:
argv.append(f'--junit-xml={self.junit_xml}')
argv.extend(test_labels)
return pytest.main(argv)
| 30.578947 | 71 | 0.589501 |
794507aab2b2dddef1934a7ea375b726668f1eaf | 776 | py | Python | data/17498 data/run_17498.py | outs1der/beans | 2a2b9b8deeb877b54d78301b5fe880298d1b6cdb | [
"MIT"
] | 2 | 2019-11-22T06:40:47.000Z | 2021-05-16T04:18:18.000Z | data/17498 data/run_17498.py | outs1der/beans | 2a2b9b8deeb877b54d78301b5fe880298d1b6cdb | [
"MIT"
] | 6 | 2020-11-20T01:08:03.000Z | 2022-01-05T05:13:22.000Z | data/17498 data/run_17498.py | outs1der/beans | 2a2b9b8deeb877b54d78301b5fe880298d1b6cdb | [
"MIT"
] | 4 | 2019-12-02T23:39:18.000Z | 2021-06-15T06:36:31.000Z |
from beans import Beans
B = Beans(ndim=10, nwalkers=200, nsteps=200, run_id="17498_4bursts",
obsname='/home/tom//Documents/Thermonuclear bursts project/beans install/beans/data/17498_obs_new.txt',
burstname='/home/tom//Documents/Thermonuclear bursts project/beans install/beans/data/17498_bursts_new.txt',
gtiname='/home/tom//Documents/Thermonuclear bursts project/beans install/beans/data/17498_gti.txt',
theta=(0.64, 0.01, 0.15, 2.1, 3.5, 0.60, 0.90, 0.0714, 1.4, 11.2), numburstssim=3, numburstsobs=4,
bc=1, ref_ind=1, gti_checking=0, threads=8, restart=False)
B.do_run()
#B.do_analysis()
#theta=(0.64, 0.01, 0.15, 2.1, 3.5, 0.78, 0.90, 0.083, 1.4, 11.2), numburstssim=7, numburstsobs=7
#/home/tom/Documents/Thermonuclear bursts project/beans install/beans/data/ | 45.647059 | 109 | 0.740979 |
794507fd1e0c3a09263509fe5195a0d30ed77092 | 3,572 | py | Python | finrl/trade/backtest.py | Darivian/FinRL | e2853d9c2a0a126a9abfac421c59a224c0755607 | [
"MIT"
] | null | null | null | finrl/trade/backtest.py | Darivian/FinRL | e2853d9c2a0a126a9abfac421c59a224c0755607 | [
"MIT"
] | null | null | null | finrl/trade/backtest.py | Darivian/FinRL | e2853d9c2a0a126a9abfac421c59a224c0755607 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from pyfolio import timeseries
import pyfolio
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from copy import deepcopy
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.config import config
def get_daily_return(df, value_col_name="account_value"):
df = deepcopy(df)
df["daily_return"] = df[value_col_name].pct_change(1)
df["date"] = pd.to_datetime(df["date"])
df.set_index("date", inplace=True, drop=True)
df.index = df.index.tz_localize("UTC")
return pd.Series(df["daily_return"], index=df.index)
def convert_daily_return_to_pyfolio_ts(df):
strategy_ret= df.copy()
strategy_ret['date'] = pd.to_datetime(strategy_ret['date'])
strategy_ret.set_index('date', drop = False, inplace = True)
strategy_ret.index = strategy_ret.index.tz_localize('UTC')
del strategy_ret['date']
ts = pd.Series(strategy_ret['daily_return'].values, index=strategy_ret.index)
return ts
def backtest_stats(account_value, value_col_name="account_value"):
dr_test = get_daily_return(account_value, value_col_name=value_col_name)
perf_stats_all = timeseries.perf_stats(
returns=dr_test,
positions=None,
transactions=None,
turnover_denom="AGB",
)
print(perf_stats_all)
return perf_stats_all
def backtest_plot(
account_value,
baseline_start=config.START_TRADE_DATE,
baseline_end=config.END_DATE,
baseline_ticker="^DJI",
value_col_name="account_value",
):
df = deepcopy(account_value)
test_returns = get_daily_return(df, value_col_name=value_col_name)
baseline_df = get_baseline(
ticker=baseline_ticker, start=baseline_start, end=baseline_end
)
baseline_returns = get_daily_return(baseline_df, value_col_name="close")
with pyfolio.plotting.plotting_context(font_scale=1.1):
pyfolio.create_full_tear_sheet(
returns=test_returns, benchmark_rets=baseline_returns, set_context=False
)
def get_baseline(ticker, start, end):
dji = YahooDownloader(
start_date=start, end_date=end, ticker_list=[ticker]
).fetch_data()
return dji
def trx_plot(df_trade,df_actions,ticker_list):
df_trx = pd.DataFrame(np.array(df_actions['transactions'].to_list()))
df_trx.columns = ticker_list
df_trx.index = df_actions['date']
df_trx.index.name = ''
for i in range(df_trx.shape[1]):
df_trx_temp = df_trx.iloc[:,i]
df_trx_temp_sign = np.sign(df_trx_temp)
buying_signal = df_trx_temp_sign.apply(lambda x: True if x>0 else False)
selling_signal = df_trx_temp_sign.apply(lambda x: True if x<0 else False)
tic_plot = df_trade[(df_trade['tic']==df_trx_temp.name) & (df_trade['date'].isin(df_trx.index))]['close']
tic_plot.index = df_trx_temp.index
plt.figure(figsize = (10, 8))
plt.plot(tic_plot, color='g', lw=2.)
plt.plot(tic_plot, '^', markersize=10, color='m', label = 'buying signal', markevery = buying_signal)
plt.plot(tic_plot, 'v', markersize=10, color='k', label = 'selling signal', markevery = selling_signal)
plt.title(f"{df_trx_temp.name} Num Transactions: {len(buying_signal[buying_signal==True]) + len(selling_signal[selling_signal==True])}")
plt.legend()
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=25))
plt.xticks(rotation=45, ha='right')
plt.show()
| 37.208333 | 145 | 0.68645 |
794508a3297f6c4482565faeb2c92772cf2b7637 | 64,382 | py | Python | test/sql/test_resultset.py | AllanDaemon/sqlalchemy | e57d63ab96bccac77c549771ab60fecd6d1bb770 | [
"MIT"
] | 2 | 2020-02-19T17:50:50.000Z | 2021-02-10T02:52:41.000Z | test/sql/test_resultset.py | AllanDaemon/sqlalchemy | e57d63ab96bccac77c549771ab60fecd6d1bb770 | [
"MIT"
] | null | null | null | test/sql/test_resultset.py | AllanDaemon/sqlalchemy | e57d63ab96bccac77c549771ab60fecd6d1bb770 | [
"MIT"
] | null | null | null | from contextlib import contextmanager
import csv
import operator
from sqlalchemy import CHAR
from sqlalchemy import column
from sqlalchemy import exc
from sqlalchemy import exc as sa_exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import util
from sqlalchemy import VARCHAR
from sqlalchemy.engine import default
from sqlalchemy.engine import result as _result
from sqlalchemy.engine import Row
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression
from sqlalchemy.sql.selectable import TextualSelect
from sqlalchemy.sql.sqltypes import NULLTYPE
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assertions
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing import le_
from sqlalchemy.testing import ne_
from sqlalchemy.testing import not_in_
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.util import collections_abc
class ResultProxyTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("user_id", Integer, ForeignKey("users.user_id")),
Column("address", String(30)),
test_needs_acid=True,
)
Table(
"users2",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
def test_row_iteration(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
r = users.select().execute()
rows = []
for row in r:
rows.append(row)
eq_(len(rows), 3)
def test_row_next(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
r = users.select().execute()
rows = []
while True:
row = next(r, "foo")
if row == "foo":
break
rows.append(row)
eq_(len(rows), 3)
@testing.requires.subqueries
def test_anonymous_rows(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
sel = (
select([users.c.user_id])
.where(users.c.user_name == "jack")
.scalar_subquery()
)
for row in select([sel + 1, sel + 3], bind=users.bind).execute():
eq_(row["anon_1"], 8)
eq_(row["anon_2"], 10)
def test_row_comparison(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="jack")
rp = users.select().execute().first()
eq_(rp, rp)
is_(not (rp != rp), True)
equal = (7, "jack")
eq_(rp, equal)
eq_(equal, rp)
is_((not (rp != equal)), True)
is_(not (equal != equal), True)
def endless():
while True:
yield 1
ne_(rp, endless())
ne_(endless(), rp)
# test that everything compares the same
# as it would against a tuple
for compare in [False, 8, endless(), "xyz", (7, "jack")]:
for op in [
operator.eq,
operator.ne,
operator.gt,
operator.lt,
operator.ge,
operator.le,
]:
try:
control = op(equal, compare)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, rp, compare)
else:
eq_(control, op(rp, compare))
try:
control = op(compare, equal)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, compare, rp)
else:
eq_(control, op(compare, rp))
@testing.provide_metadata
def test_column_label_overlap_fallback(self):
content = Table("content", self.metadata, Column("type", String(30)))
bar = Table("bar", self.metadata, Column("content_type", String(30)))
self.metadata.create_all(testing.db)
testing.db.execute(content.insert().values(type="t1"))
row = testing.db.execute(content.select(use_labels=True)).first()
in_(content.c.type, row)
not_in_(bar.c.content_type, row)
row = testing.db.execute(
select([func.now().label("content_type")])
).first()
not_in_(content.c.type, row)
not_in_(bar.c.content_type, row)
def test_pickled_rows(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
for pickle in False, True:
for use_labels in False, True:
result = (
users.select(use_labels=use_labels)
.order_by(users.c.user_id)
.execute()
.fetchall()
)
if pickle:
result = util.pickle.loads(util.pickle.dumps(result))
eq_(result, [(7, "jack"), (8, "ed"), (9, "fred")])
if use_labels:
eq_(result[0]["users_user_id"], 7)
eq_(
list(result[0].keys()),
["users_user_id", "users_user_name"],
)
else:
eq_(result[0]["user_id"], 7)
eq_(list(result[0].keys()), ["user_id", "user_name"])
eq_(result[0][0], 7)
assert_raises(
exc.NoSuchColumnError, lambda: result[0]["fake key"]
)
def test_column_error_printing(self):
result = testing.db.execute(select([1]))
row = result.first()
class unprintable(object):
def __str__(self):
raise ValueError("nope")
msg = r"Could not locate column in row for column '%s'"
for accessor, repl in [
("x", "x"),
(Column("q", Integer), "q"),
(Column("q", Integer) + 12, r"q \+ :q_1"),
(unprintable(), "unprintable element.*"),
]:
assert_raises_message(
exc.NoSuchColumnError, msg % repl, result._getter, accessor
)
is_(result._getter(accessor, False), None)
assert_raises_message(
exc.NoSuchColumnError, msg % repl, lambda: row[accessor]
)
def test_fetchmany(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="jack")
users.insert().execute(user_id=8, user_name="ed")
users.insert().execute(user_id=9, user_name="fred")
r = users.select().execute()
rows = []
for row in r.fetchmany(size=2):
rows.append(row)
eq_(len(rows), 2)
def test_column_slices(self):
users = self.tables.users
addresses = self.tables.addresses
users.insert().execute(user_id=1, user_name="john")
users.insert().execute(user_id=2, user_name="jack")
addresses.insert().execute(
address_id=1, user_id=2, address="[email protected]"
)
r = text("select * from addresses", bind=testing.db).execute().first()
eq_(r[0:1], (1,))
eq_(r[1:], (2, "[email protected]"))
eq_(r[:-1], (1, 2))
def test_column_accessor_basic_compiled(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
)
r = users.select(users.c.user_id == 2).execute().first()
eq_(r.user_id, 2)
eq_(r["user_id"], 2)
eq_(r[users.c.user_id], 2)
eq_(r.user_name, "jack")
eq_(r["user_name"], "jack")
eq_(r[users.c.user_name], "jack")
def test_column_accessor_basic_text(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
)
r = testing.db.execute(
text("select * from users where user_id=2")
).first()
eq_(r.user_id, 2)
eq_(r["user_id"], 2)
eq_(r.user_name, "jack")
eq_(r["user_name"], "jack")
def test_column_accessor_textual_select(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
)
# this will create column() objects inside
# the select(), these need to match on name anyway
r = testing.db.execute(
select([column("user_id"), column("user_name")])
.select_from(table("users"))
.where(text("user_id=2"))
).first()
eq_(r.user_id, 2)
eq_(r["user_id"], 2)
eq_(r.user_name, "jack")
eq_(r["user_name"], "jack")
def test_column_accessor_dotted_union(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
# test a little sqlite < 3.10.0 weirdness - with the UNION,
# cols come back as "users.user_id" in cursor.description
r = testing.db.execute(
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users"
)
).first()
eq_(r["user_id"], 1)
eq_(r["user_name"], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_sqlite_raw(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
r = (
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
bind=testing.db,
)
.execution_options(sqlite_raw_colnames=True)
.execute()
.first()
)
if testing.against("sqlite < 3.10.0"):
not_in_("user_id", r)
not_in_("user_name", r)
eq_(r["users.user_id"], 1)
eq_(r["users.user_name"], "john")
eq_(list(r.keys()), ["users.user_id", "users.user_name"])
else:
not_in_("users.user_id", r)
not_in_("users.user_name", r)
eq_(r["user_id"], 1)
eq_(r["user_name"], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_sqlite_translated(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
r = (
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
bind=testing.db,
)
.execute()
.first()
)
eq_(r["user_id"], 1)
eq_(r["user_name"], "john")
if testing.against("sqlite < 3.10.0"):
eq_(r["users.user_id"], 1)
eq_(r["users.user_name"], "john")
else:
not_in_("users.user_id", r)
not_in_("users.user_name", r)
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_labels_w_dots(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
# test using literal tablename.colname
r = (
text(
'select users.user_id AS "users.user_id", '
'users.user_name AS "users.user_name" '
"from users",
bind=testing.db,
)
.execution_options(sqlite_raw_colnames=True)
.execute()
.first()
)
eq_(r["users.user_id"], 1)
eq_(r["users.user_name"], "john")
not_in_("user_name", r)
eq_(list(r.keys()), ["users.user_id", "users.user_name"])
def test_column_accessor_unary(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
# unary expressions
r = (
select([users.c.user_name.distinct()])
.order_by(users.c.user_name)
.execute()
.first()
)
eq_(r[users.c.user_name], "john")
eq_(r.user_name, "john")
def test_column_accessor_err(self):
r = testing.db.execute(select([1])).first()
assert_raises_message(
AttributeError,
"Could not locate column in row for column 'foo'",
getattr,
r,
"foo",
)
assert_raises_message(
KeyError,
"Could not locate column in row for column 'foo'",
lambda: r["foo"],
)
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
"""
# these proxies don't work with no cursor.description present.
# so they don't apply to this test at the moment.
# result.FullyBufferedResultProxy,
# result.BufferedRowResultProxy,
# result.BufferedColumnResultProxy
users = self.tables.users
conn = testing.db.connect()
for meth in [
lambda r: r.fetchone(),
lambda r: r.fetchall(),
lambda r: r.first(),
lambda r: r.scalar(),
lambda r: r.fetchmany(),
lambda r: r._getter("user"),
lambda r: r._has_key("user"),
]:
trans = conn.begin()
result = conn.execute(users.insert(), user_id=1)
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows. "
"It has been closed automatically.",
meth,
result,
)
trans.rollback()
def test_fetchone_til_end(self):
result = testing.db.execute("select * from users")
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
result.close()
assert_raises_message(
exc.ResourceClosedError,
"This result object is closed.",
result.fetchone,
)
def test_connectionless_autoclose_rows_exhausted(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
result = testing.db.execute("select * from users")
connection = result.connection
assert not connection.closed
eq_(result.fetchone(), (1, "john"))
assert not connection.closed
eq_(result.fetchone(), None)
assert connection.closed
@testing.requires.returning
def test_connectionless_autoclose_crud_rows_exhausted(self):
users = self.tables.users
stmt = (
users.insert()
.values(user_id=1, user_name="john")
.returning(users.c.user_id)
)
result = testing.db.execute(stmt)
connection = result.connection
assert not connection.closed
eq_(result.fetchone(), (1,))
assert not connection.closed
eq_(result.fetchone(), None)
assert connection.closed
def test_connectionless_autoclose_no_rows(self):
result = testing.db.execute("select * from users")
connection = result.connection
assert not connection.closed
eq_(result.fetchone(), None)
assert connection.closed
@testing.requires.updateable_autoincrement_pks
def test_connectionless_autoclose_no_metadata(self):
result = testing.db.execute("update users set user_id=5")
connection = result.connection
assert connection.closed
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows.",
result.fetchone,
)
def test_row_case_sensitive(self):
row = testing.db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
]
)
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
assert_raises(KeyError, lambda: row["Case_insensitive"])
assert_raises(KeyError, lambda: row["casesensitive"])
def test_row_case_sensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": True})
row = ins_db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols"),
]
)
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"],
)
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
assert_raises(KeyError, lambda: row["Case_insensitive"])
assert_raises(KeyError, lambda: row["casesensitive"])
assert_raises(KeyError, lambda: row["screw_UP_the_cols"])
def test_row_case_insensitive(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
]
)
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
def test_row_case_insensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols"),
]
)
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"],
)
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
eq_(row["screw_UP_the_cols"], 3)
def test_row_as_args(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="john")
r = users.select(users.c.user_id == 1).execute().first()
users.delete().execute()
users.insert().execute(r)
eq_(users.select().execute().fetchall(), [(1, "john")])
def test_result_as_args(self):
users = self.tables.users
users2 = self.tables.users2
users.insert().execute(
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="ed"),
]
)
r = users.select().execute()
users2.insert().execute(list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, "john"), (2, "ed")],
)
users2.delete().execute()
r = users.select().execute()
users2.insert().execute(*list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, "john"), (2, "ed")],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column(self):
users = self.tables.users
addresses = self.tables.addresses
users.insert().execute(user_id=1, user_name="john")
result = users.outerjoin(addresses).select().execute()
r = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r["user_id"],
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
result._getter,
"user_id",
)
# pure positional targeting; users.c.user_id
# and addresses.c.user_id are known!
# works as of 1.1 issue #3501
eq_(r[users.c.user_id], 1)
eq_(r[addresses.c.user_id], None)
# try to trick it - fake_table isn't in the result!
# we get the correct error
fake_table = Table("fake", MetaData(), Column("user_id", Integer))
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row for column 'fake.user_id'",
lambda: r[fake_table.c.user_id],
)
r = util.pickle.loads(util.pickle.dumps(r))
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r["user_id"],
)
result = users.outerjoin(addresses).select().execute()
result = _result.BufferedColumnResultProxy(result.context)
r = result.first()
assert isinstance(r, _result.BufferedColumnRow)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r["user_id"],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_by_col(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="john")
ua = users.alias()
u2 = users.alias()
result = select([users.c.user_id, ua.c.user_id]).execute()
row = result.first()
# as of 1.1 issue #3501, we use pure positional
# targeting for the column objects here
eq_(row[users.c.user_id], 1)
eq_(row[ua.c.user_id], 1)
# this now works as of 1.1 issue #3501;
# previously this was stuck on "ambiguous column name"
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row",
lambda: row[u2.c.user_id],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_case_sensitive(self):
eng = engines.testing_engine(options=dict(case_sensitive=False))
row = eng.execute(
select(
[
literal_column("1").label("SOMECOL"),
literal_column("1").label("SOMECOL"),
]
)
).first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row["somecol"],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_contains(self):
users = self.tables.users
addresses = self.tables.addresses
# ticket 2702. in 0.7 we'd get True, False.
# in 0.8, both columns are present so it's True;
# but when they're fetched you'll get the ambiguous error.
users.insert().execute(user_id=1, user_name="john")
result = (
select([users.c.user_id, addresses.c.user_id])
.select_from(users.outerjoin(addresses))
.execute()
)
row = result.first()
eq_(
set([users.c.user_id in row, addresses.c.user_id in row]),
set([True]),
)
def test_loose_matching_one(self):
users = self.tables.users
addresses = self.tables.addresses
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "john"})
conn.execute(
addresses.insert(),
{"address_id": 1, "user_id": 1, "address": "email"},
)
# use some column labels in the SELECT
result = conn.execute(
TextualSelect(
text(
"select users.user_name AS users_user_name, "
"users.user_id AS user_id, "
"addresses.address_id AS address_id "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id "
"WHERE users.user_id=1 "
),
[
users.c.user_id,
users.c.user_name,
addresses.c.address_id,
],
positional=False,
)
)
row = result.first()
eq_(row[users.c.user_id], 1)
eq_(row[users.c.user_name], "john")
def test_loose_matching_two(self):
users = self.tables.users
addresses = self.tables.addresses
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "john"})
conn.execute(
addresses.insert(),
{"address_id": 1, "user_id": 1, "address": "email"},
)
# use some column labels in the SELECT
result = conn.execute(
TextualSelect(
text(
"select users.user_name AS users_user_name, "
"users.user_id AS user_id, "
"addresses.user_id "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id "
"WHERE users.user_id=1 "
),
[users.c.user_id, users.c.user_name, addresses.c.user_id],
positional=False,
)
)
row = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[users.c.user_id],
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[addresses.c.user_id],
)
eq_(row[users.c.user_name], "john")
def test_ambiguous_column_by_col_plus_label(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="john")
result = select(
[
users.c.user_id,
type_coerce(users.c.user_id, Integer).label("foo"),
]
).execute()
row = result.first()
eq_(row[users.c.user_id], 1)
eq_(row[1], 1)
def test_fetch_partial_result_map(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="ed")
t = text("select * from users").columns(user_name=String())
eq_(testing.db.execute(t).fetchall(), [(7, "ed")])
def test_fetch_unordered_result_map(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="ed")
class Goofy1(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "a"
class Goofy2(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "b"
class Goofy3(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "c"
t = text(
"select user_name as a, user_name as b, "
"user_name as c from users"
).columns(a=Goofy1(), b=Goofy2(), c=Goofy3())
eq_(testing.db.execute(t).fetchall(), [("eda", "edb", "edc")])
@testing.requires.subqueries
def test_column_label_targeting(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="ed")
for s in (
users.select().alias("foo"),
users.select().alias(users.name),
):
row = s.select(use_labels=True).execute().first()
eq_(row[s.c.user_id], 7)
eq_(row[s.c.user_name], "ed")
def test_keys(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
result = users.select().execute()
eq_(result.keys(), ["user_id", "user_name"])
row = result.first()
eq_(row.keys(), ["user_id", "user_name"])
def test_keys_anon_labels(self):
"""test [ticket:3483]"""
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
result = testing.db.execute(
select(
[
users.c.user_id,
users.c.user_name.label(None),
func.count(literal_column("1")),
]
).group_by(users.c.user_id, users.c.user_name)
)
eq_(result.keys(), ["user_id", "user_name_1", "count_1"])
row = result.first()
eq_(row.keys(), ["user_id", "user_name_1", "count_1"])
def test_items(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = users.select().execute().first()
eq_(
[(x[0].lower(), x[1]) for x in list(r.items())],
[("user_id", 1), ("user_name", "foo")],
)
def test_len(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = users.select().execute().first()
eq_(len(r), 2)
r = testing.db.execute("select user_name, user_id from users").first()
eq_(len(r), 2)
r = testing.db.execute("select user_name from users").first()
eq_(len(r), 1)
def test_sorting_in_python(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="foo"),
dict(user_id=2, user_name="bar"),
dict(user_id=3, user_name="def"),
)
rows = users.select().order_by(users.c.user_name).execute().fetchall()
eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")])
eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")])
def test_column_order_with_simple_query(self):
# should return values in column definition order
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = users.select(users.c.user_id == 1).execute().first()
eq_(r[0], 1)
eq_(r[1], "foo")
eq_([x.lower() for x in list(r.keys())], ["user_id", "user_name"])
eq_(list(r.values()), [1, "foo"])
def test_column_order_with_text_query(self):
# should return values in query order
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = testing.db.execute("select user_name, user_id from users").first()
eq_(r[0], "foo")
eq_(r[1], 1)
eq_([x.lower() for x in list(r.keys())], ["user_name", "user_id"])
eq_(list(r.values()), ["foo", 1])
@testing.crashes("oracle", "FIXME: unknown, varify not fails_on()")
@testing.crashes("firebird", "An identifier must begin with a letter")
@testing.provide_metadata
def test_column_accessor_shadow(self):
shadowed = Table(
"test_shadowed",
self.metadata,
Column("shadow_id", INT, primary_key=True),
Column("shadow_name", VARCHAR(20)),
Column("parent", VARCHAR(20)),
Column("row", VARCHAR(40)),
Column("_parent", VARCHAR(20)),
Column("_row", VARCHAR(20)),
)
self.metadata.create_all()
shadowed.insert().execute(
shadow_id=1,
shadow_name="The Shadow",
parent="The Light",
row="Without light there is no shadow",
_parent="Hidden parent",
_row="Hidden row",
)
r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
eq_(r.shadow_id, 1)
eq_(r["shadow_id"], 1)
eq_(r[shadowed.c.shadow_id], 1)
eq_(r.shadow_name, "The Shadow")
eq_(r["shadow_name"], "The Shadow")
eq_(r[shadowed.c.shadow_name], "The Shadow")
eq_(r.parent, "The Light")
eq_(r["parent"], "The Light")
eq_(r[shadowed.c.parent], "The Light")
eq_(r.row, "Without light there is no shadow")
eq_(r["row"], "Without light there is no shadow")
eq_(r[shadowed.c.row], "Without light there is no shadow")
eq_(r["_parent"], "Hidden parent")
eq_(r["_row"], "Hidden row")
def test_nontuple_row(self):
"""ensure the C version of BaseRow handles
duck-type-dependent rows.
As of 1.4 they are converted internally to tuples in any case.
"""
class MyList(object):
def __init__(self, data):
self.internal_list = data
def __len__(self):
return len(self.internal_list)
def __getitem__(self, i):
return list.__getitem__(self.internal_list, i)
proxy = Row(
object(),
[None],
{"key": (0, None, "key"), 0: (0, None, "key")},
MyList(["value"]),
)
eq_(list(proxy), ["value"])
eq_(proxy[0], "value")
eq_(proxy["key"], "value")
@testing.provide_metadata
def test_no_rowcount_on_selects_inserts(self):
"""assert that rowcount is only called on deletes and updates.
This because cursor.rowcount may can be expensive on some dialects
such as Firebird, however many dialects require it be called
before the cursor is closed.
"""
metadata = self.metadata
engine = engines.testing_engine()
t = Table("t1", metadata, Column("data", String(10)))
metadata.create_all(engine)
with patch.object(
engine.dialect.execution_ctx_cls, "rowcount"
) as mock_rowcount:
mock_rowcount.__get__ = Mock()
engine.execute(
t.insert(), {"data": "d1"}, {"data": "d2"}, {"data": "d3"}
)
eq_(len(mock_rowcount.__get__.mock_calls), 0)
eq_(
engine.execute(t.select()).fetchall(),
[("d1",), ("d2",), ("d3",)],
)
eq_(len(mock_rowcount.__get__.mock_calls), 0)
engine.execute(t.update(), {"data": "d4"})
eq_(len(mock_rowcount.__get__.mock_calls), 1)
engine.execute(t.delete())
eq_(len(mock_rowcount.__get__.mock_calls), 2)
def test_row_is_sequence(self):
row = Row(
object(), [None], {"key": (None, 0), 0: (None, 0)}, ["value"]
)
is_true(isinstance(row, collections_abc.Sequence))
def test_row_is_hashable(self):
row = Row(
object(),
[None, None, None],
{"key": (None, 0), 0: (None, 0)},
(1, "value", "foo"),
)
eq_(hash(row), hash((1, "value", "foo")))
@testing.provide_metadata
def test_row_getitem_indexes_compiled(self):
values = Table(
"rp",
self.metadata,
Column("key", String(10), primary_key=True),
Column("value", String(10)),
)
values.create()
testing.db.execute(values.insert(), dict(key="One", value="Uno"))
row = testing.db.execute(values.select()).first()
eq_(row["key"], "One")
eq_(row["value"], "Uno")
eq_(row[0], "One")
eq_(row[1], "Uno")
eq_(row[-2], "One")
eq_(row[-1], "Uno")
eq_(row[1:0:-1], ("Uno",))
@testing.only_on("sqlite")
def test_row_getitem_indexes_raw(self):
row = testing.db.execute("select 'One' as key, 'Uno' as value").first()
eq_(row["key"], "One")
eq_(row["value"], "Uno")
eq_(row[0], "One")
eq_(row[1], "Uno")
eq_(row[-2], "One")
eq_(row[-1], "Uno")
eq_(row[1:0:-1], ("Uno",))
@testing.requires.cextensions
def test_row_c_sequence_check(self):
metadata = MetaData()
metadata.bind = "sqlite://"
users = Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(40)),
)
users.create()
users.insert().execute(name="Test")
row = users.select().execute().fetchone()
s = util.StringIO()
writer = csv.writer(s)
# csv performs PySequenceCheck call
writer.writerow(row)
assert s.getvalue().strip() == "1,Test"
@testing.requires.selectone
def test_empty_accessors(self):
statements = [
(
"select 1",
[
lambda r: r.last_inserted_params(),
lambda r: r.last_updated_params(),
lambda r: r.prefetch_cols(),
lambda r: r.postfetch_cols(),
lambda r: r.inserted_primary_key,
],
"Statement is not a compiled expression construct.",
),
(
select([1]),
[
lambda r: r.last_inserted_params(),
lambda r: r.inserted_primary_key,
],
r"Statement is not an insert\(\) expression construct.",
),
(
select([1]),
[lambda r: r.last_updated_params()],
r"Statement is not an update\(\) expression construct.",
),
(
select([1]),
[lambda r: r.prefetch_cols(), lambda r: r.postfetch_cols()],
r"Statement is not an insert\(\) "
r"or update\(\) expression construct.",
),
]
for stmt, meths, msg in statements:
r = testing.db.execute(stmt)
try:
for meth in meths:
assert_raises_message(
sa_exc.InvalidRequestError, msg, meth, r
)
finally:
r.close()
class KeyTargetingTest(fixtures.TablesTest):
run_inserts = "once"
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"keyed1",
metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
)
Table("keyed2", metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
Table("keyed3", metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
Table("keyed4", metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
Table("content", metadata, Column("t", String(30), key="type"))
Table("bar", metadata, Column("ctype", String(30), key="content_type"))
if testing.requires.schemas.enabled:
Table(
"wschema",
metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
schema=testing.config.test_schema,
)
@classmethod
def insert_data(cls):
cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
cls.tables.content.insert().execute(type="t1")
if testing.requires.schemas.enabled:
cls.tables[
"%s.wschema" % testing.config.test_schema
].insert().execute(dict(b="a1", q="c1"))
@testing.requires.schemas
def test_keyed_accessor_wschema(self):
keyed1 = self.tables["%s.wschema" % testing.config.test_schema]
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single_labeled(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select().apply_labels()).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_c, "c1")
def _test_keyed_targeting_no_label_at_all(self, expression):
lt = literal_column("2")
stmt = select([literal_column("1"), expression, lt]).select_from(
self.tables.keyed1
)
row = testing.db.execute(stmt).first()
eq_(row[expression], "a1")
eq_(row[lt], 2)
# Postgresql for example has the key as "?column?", which dupes
# easily. we get around that because we know that "2" is unique
eq_(row["2"], 2)
def test_keyed_targeting_no_label_at_all_one(self):
class not_named_max(expression.ColumnElement):
name = "not_named_max"
@compiles(not_named_max)
def visit_max(element, compiler, **kw):
# explicit add
kw["add_to_result_map"](None, None, (element,), NULLTYPE)
return "max(a)"
# assert that there is no "AS max_" or any label of any kind.
eq_(str(select([not_named_max()])), "SELECT max(a)")
nnm = not_named_max()
self._test_keyed_targeting_no_label_at_all(nnm)
def test_keyed_targeting_no_label_at_all_two(self):
class not_named_max(expression.ColumnElement):
name = "not_named_max"
@compiles(not_named_max)
def visit_max(element, compiler, **kw):
# we don't add to keymap here; compiler should be doing it
return "max(a)"
# assert that there is no "AS max_" or any label of any kind.
eq_(str(select([not_named_max()])), "SELECT max(a)")
nnm = not_named_max()
self._test_keyed_targeting_no_label_at_all(nnm)
def test_keyed_targeting_no_label_at_all_text(self):
t1 = text("max(a)")
t2 = text("min(a)")
stmt = select([t1, t2]).select_from(self.tables.keyed1)
row = testing.db.execute(stmt).first()
eq_(row[t1], "a1")
eq_(row[t2], "a1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_conflict_2(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2])).first()
# column access is unambiguous
eq_(row[self.tables.keyed2.c.b], "b2")
# row.a is ambiguous
assert_raises_message(
exc.InvalidRequestError, "Ambig", getattr, row, "a"
)
# for "b" we have kind of a choice. the name "b" is not ambiguous in
# cursor.description in this case. It is however ambiguous as far as
# the objects we have queried against, because keyed1.c.a has key="b"
# and keyed1.c.b is "b". historically this was allowed as
# non-ambiguous, however the column it targets changes based on
# whether or not the dupe is present so it's ambiguous
# eq_(row.b, "b2")
assert_raises_message(
exc.InvalidRequestError, "Ambig", getattr, row, "b"
)
# illustrate why row.b above is ambiguous, and not "b2"; because
# if we didn't have keyed2, now it matches row.a. a new column
# shouldn't be able to grab the value from a previous column.
row = testing.db.execute(select([keyed1])).first()
eq_(row.b, "a1")
def test_keyed_accessor_composite_conflict_2_fix_w_uselabels(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(
select([keyed1, keyed2]).apply_labels()
).first()
# column access is unambiguous
eq_(row[self.tables.keyed2.c.b], "b2")
eq_(row["keyed2_b"], "b2")
eq_(row["keyed1_a"], "a1")
def test_keyed_accessor_composite_names_precedent(self):
keyed1 = self.tables.keyed1
keyed4 = self.tables.keyed4
row = testing.db.execute(select([keyed1, keyed4])).first()
eq_(row.b, "b4")
eq_(row.q, "q4")
eq_(row.a, "a1")
eq_(row.c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_keys_precedent(self):
keyed1 = self.tables.keyed1
keyed3 = self.tables.keyed3
row = testing.db.execute(select([keyed1, keyed3])).first()
eq_(row.q, "c1")
# prior to 1.4 #4887, this raised an "ambiguous column name 'a'""
# message, because "b" is linked to "a" which is a dupe. but we know
# where "b" is in the row by position.
eq_(row.b, "a1")
# "a" is of course ambiguous
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr,
row,
"a",
)
eq_(row.d, "d3")
def test_keyed_accessor_composite_labeled(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(
select([keyed1, keyed2]).apply_labels()
).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_c, "c1")
eq_(row.keyed2_a, "a2")
eq_(row.keyed2_b, "b2")
assert_raises(KeyError, lambda: row["keyed2_c"])
assert_raises(KeyError, lambda: row["keyed2_q"])
def test_keyed_accessor_column_is_repeated_multiple_times(self):
# test new logic added as a result of the combination of #4892 and
# #4887. We allow duplicate columns, but we also have special logic
# to disambiguate for the same column repeated, and as #4887 adds
# stricter ambiguous result column logic, the compiler has to know to
# not add these dupe columns to the result map, else they register as
# ambiguous.
keyed2 = self.tables.keyed2
keyed3 = self.tables.keyed3
stmt = select(
[
keyed2.c.a,
keyed3.c.a,
keyed2.c.a,
keyed2.c.a,
keyed3.c.a,
keyed3.c.a,
keyed3.c.d,
keyed3.c.d,
]
).apply_labels()
result = testing.db.execute(stmt)
is_false(result._metadata.matched_on_name)
# ensure the result map is the same number of cols so we can
# use positional targeting
eq_(
[rec[0] for rec in result.context.compiled._result_columns],
[
"keyed2_a",
"keyed3_a",
"keyed2_a__1",
"keyed2_a__1",
"keyed3_a__1",
"keyed3_a__1",
"keyed3_d",
"keyed3_d__1",
],
)
row = result.first()
# keyed access will ignore the dupe cols
eq_(row[keyed2.c.a], "a2")
eq_(row[keyed3.c.a], "a3")
eq_(result._getter(keyed3.c.a)(row), "a3")
eq_(row[keyed3.c.d], "d3")
# however we can get everything positionally
eq_(row, ("a2", "a3", "a2", "a2", "a3", "a3", "d3", "d3"))
eq_(row[0], "a2")
eq_(row[1], "a3")
eq_(row[2], "a2")
eq_(row[3], "a2")
eq_(row[4], "a3")
eq_(row[5], "a3")
eq_(row[6], "d3")
eq_(row[7], "d3")
def test_columnclause_schema_column_one(self):
# originally addressed by [ticket:2932], however liberalized
# Column-targeting rules are deprecated
a, b = sql.column("a"), sql.column("b")
stmt = select([a, b]).select_from(table("keyed2"))
row = testing.db.execute(stmt).first()
in_(a, row)
in_(b, row)
def test_columnclause_schema_column_two(self):
keyed2 = self.tables.keyed2
stmt = select([keyed2.c.a, keyed2.c.b])
row = testing.db.execute(stmt).first()
in_(keyed2.c.a, row)
in_(keyed2.c.b, row)
def test_columnclause_schema_column_three(self):
# this is also addressed by [ticket:2932]
stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
row = testing.db.execute(stmt).first()
in_(stmt.selected_columns.a, row)
in_(stmt.selected_columns.b, row)
def test_columnclause_schema_column_four(self):
# originally addressed by [ticket:2932], however liberalized
# Column-targeting rules are deprecated
a, b = sql.column("keyed2_a"), sql.column("keyed2_b")
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
a, b
)
row = testing.db.execute(stmt).first()
in_(a, row)
in_(b, row)
in_(stmt.selected_columns.keyed2_a, row)
in_(stmt.selected_columns.keyed2_b, row)
def test_columnclause_schema_column_five(self):
# this is also addressed by [ticket:2932]
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_a=CHAR, keyed2_b=CHAR
)
row = testing.db.execute(stmt).first()
in_(stmt.selected_columns.keyed2_a, row)
in_(stmt.selected_columns.keyed2_b, row)
class PositionalTextTest(fixtures.TablesTest):
run_inserts = "once"
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"text1",
metadata,
Column("a", CHAR(2)),
Column("b", CHAR(2)),
Column("c", CHAR(2)),
Column("d", CHAR(2)),
)
@classmethod
def insert_data(cls):
cls.tables.text1.insert().execute(
[dict(a="a1", b="b1", c="c1", d="d1")]
)
def test_via_column(self):
c1, c2, c3, c4 = column("q"), column("p"), column("r"), column("d")
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c2], "b1")
eq_(row[c4], "d1")
eq_(row[1], "b1")
eq_(row["b"], "b1")
eq_(row.keys(), ["a", "b", "c", "d"])
eq_(row["r"], "c1")
eq_(row["d"], "d1")
def test_fewer_cols_than_sql_positional(self):
c1, c2 = column("q"), column("p")
stmt = text("select a, b, c, d from text1").columns(c1, c2)
# no warning as this can be similar for non-positional
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row["c"], "c1")
def test_fewer_cols_than_sql_non_positional(self):
c1, c2 = column("a"), column("p")
stmt = text("select a, b, c, d from text1").columns(c2, c1, d=CHAR)
# no warning as this can be similar for non-positional
result = testing.db.execute(stmt)
row = result.first()
# c1 name matches, locates
eq_(row[c1], "a1")
eq_(row["c"], "c1")
# c2 name does not match, doesn't locate
assert_raises_message(
exc.NoSuchColumnError, "in row for column 'p'", lambda: row[c2]
)
def test_more_cols_than_sql(self):
c1, c2, c3, c4 = column("q"), column("p"), column("r"), column("d")
stmt = text("select a, b from text1").columns(c1, c2, c3, c4)
with assertions.expect_warnings(
r"Number of columns in textual SQL \(4\) is "
r"smaller than number of columns requested \(2\)"
):
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c2], "b1")
assert_raises_message(
exc.NoSuchColumnError, "in row for column 'r'", lambda: row[c3]
)
def test_dupe_col_obj(self):
c1, c2, c3 = column("q"), column("p"), column("r")
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c2)
assert_raises_message(
exc.InvalidRequestError,
"Duplicate column expression requested in "
"textual SQL: <.*.ColumnClause.*; p>",
testing.db.execute,
stmt,
)
def test_anon_aliased_unique(self):
text1 = self.tables.text1
c1 = text1.c.a.label(None)
c2 = text1.alias().c.c
c3 = text1.alias().c.b
c4 = text1.alias().c.d.label(None)
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
# text1.c.b goes nowhere....because we hit key fallback
# but the text1.c.b doesn't derive from text1.c.c
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'text1.b'",
lambda: row[text1.c.b],
)
def test_anon_aliased_overlapping(self):
text1 = self.tables.text1
c1 = text1.c.a.label(None)
c2 = text1.alias().c.a
c3 = text1.alias().c.a.label(None)
c4 = text1.c.a.label(None)
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
def test_anon_aliased_name_conflict(self):
text1 = self.tables.text1
c1 = text1.c.a.label("a")
c2 = text1.alias().c.a
c3 = text1.alias().c.a.label("a")
c4 = text1.c.a.label("a")
# all cols are named "a". if we are positional, we don't care.
# this is new logic in 1.1
stmt = text("select a, b as a, c as a, d as a from text1").columns(
c1, c2, c3, c4
)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
# fails, because we hit key fallback and find conflicts
# in columns that are presnet
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'text1.a'",
lambda: row[text1.c.a],
)
class AlternateResultProxyTest(fixtures.TablesTest):
__requires__ = ("sqlite",)
@classmethod
def setup_bind(cls):
cls.engine = engine = engines.testing_engine("sqlite://")
return engine
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column("x", Integer, primary_key=True),
Column("y", String(50)),
)
@classmethod
def insert_data(cls):
cls.engine.execute(
cls.tables.test.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(1, 12)],
)
@contextmanager
def _proxy_fixture(self, cls):
self.table = self.tables.test
class ExcCtx(default.DefaultExecutionContext):
def get_result_proxy(self):
return cls(self)
self.patcher = patch.object(
self.engine.dialect, "execution_ctx_cls", ExcCtx
)
with self.patcher:
yield
def _test_proxy(self, cls):
with self._proxy_fixture(cls):
rows = []
r = self.engine.execute(select([self.table]))
assert isinstance(r, cls)
for i in range(5):
rows.append(r.fetchone())
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
rows = r.fetchmany(3)
eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])
rows = r.fetchall()
eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])
r = self.engine.execute(select([self.table]))
rows = r.fetchmany(None)
eq_(rows[0], (1, "t_1"))
# number of rows here could be one, or the whole thing
assert len(rows) == 1 or len(rows) == 11
r = self.engine.execute(select([self.table]).limit(1))
r.fetchone()
eq_(r.fetchone(), None)
r = self.engine.execute(select([self.table]).limit(5))
rows = r.fetchmany(6)
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
# result keeps going just fine with blank results...
eq_(r.fetchmany(2), [])
eq_(r.fetchmany(2), [])
eq_(r.fetchall(), [])
eq_(r.fetchone(), None)
# until we close
r.close()
self._assert_result_closed(r)
r = self.engine.execute(select([self.table]).limit(5))
eq_(r.first(), (1, "t_1"))
self._assert_result_closed(r)
r = self.engine.execute(select([self.table]).limit(5))
eq_(r.scalar(), 1)
self._assert_result_closed(r)
def _assert_result_closed(self, r):
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchone
)
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchmany, 2
)
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchall
)
def test_basic_plain(self):
self._test_proxy(_result.ResultProxy)
def test_basic_buffered_row_result_proxy(self):
self._test_proxy(_result.BufferedRowResultProxy)
def test_basic_fully_buffered_result_proxy(self):
self._test_proxy(_result.FullyBufferedResultProxy)
def test_basic_buffered_column_result_proxy(self):
self._test_proxy(_result.BufferedColumnResultProxy)
def test_resultprocessor_plain(self):
self._test_result_processor(_result.ResultProxy, False)
def test_resultprocessor_plain_cached(self):
self._test_result_processor(_result.ResultProxy, True)
def test_resultprocessor_buffered_column(self):
self._test_result_processor(_result.BufferedColumnResultProxy, False)
def test_resultprocessor_buffered_column_cached(self):
self._test_result_processor(_result.BufferedColumnResultProxy, True)
def test_resultprocessor_buffered_row(self):
self._test_result_processor(_result.BufferedRowResultProxy, False)
def test_resultprocessor_buffered_row_cached(self):
self._test_result_processor(_result.BufferedRowResultProxy, True)
def test_resultprocessor_fully_buffered(self):
self._test_result_processor(_result.FullyBufferedResultProxy, False)
def test_resultprocessor_fully_buffered_cached(self):
self._test_result_processor(_result.FullyBufferedResultProxy, True)
def _test_result_processor(self, cls, use_cache):
class MyType(TypeDecorator):
impl = String()
def process_result_value(self, value, dialect):
return "HI " + value
with self._proxy_fixture(cls):
with self.engine.connect() as conn:
if use_cache:
cache = {}
conn = conn.execution_options(compiled_cache=cache)
stmt = select([literal("THERE", type_=MyType())])
for i in range(2):
r = conn.execute(stmt)
eq_(r.scalar(), "HI THERE")
@testing.fixture
def row_growth_fixture(self):
with self._proxy_fixture(_result.BufferedRowResultProxy):
with self.engine.connect() as conn:
conn.execute(
self.table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 3000)],
)
yield conn
@testing.combinations(
("no option", None, {0: 5, 1: 25, 9: 125, 135: 625, 274: 1000}),
("lt 1000", 27, {0: 5, 16: 27, 70: 27, 150: 27, 250: 27}),
(
"gt 1000",
1500,
{0: 5, 1: 25, 9: 125, 135: 625, 274: 1500, 1351: 1500},
),
(
"gt 1500",
2000,
{0: 5, 1: 25, 9: 125, 135: 625, 274: 2000, 1351: 2000},
),
id_="iaa",
argnames="max_row_buffer,checks",
)
def test_buffered_row_growth(
self, row_growth_fixture, max_row_buffer, checks
):
if max_row_buffer:
result = row_growth_fixture.execution_options(
max_row_buffer=max_row_buffer
).execute(self.table.select())
else:
result = row_growth_fixture.execute(self.table.select())
assertion = {}
max_size = max(checks.values())
for idx, row in enumerate(result, 0):
if idx in checks:
assertion[idx] = result._bufsize
le_(len(result._BufferedRowResultProxy__rowbuffer), max_size)
eq_(checks, assertion)
| 32.36903 | 79 | 0.548476 |
7945090ba89589545bc08e770a82098cba240aa1 | 40,506 | py | Python | tensorflow/python/training/supervisor.py | 285219011/hello-world | dfb71ea206eb9f61e5d97c9727caa1a6449e39cb | [
"Apache-2.0"
] | 6 | 2017-04-25T01:30:41.000Z | 2019-12-11T15:08:46.000Z | tensorflow/python/training/supervisor.py | PaulTR/tensorflow | 84bcff1e814ee5697b5980535583737f8e81d82f | [
"Apache-2.0"
] | null | null | null | tensorflow/python/training/supervisor.py | PaulTR/tensorflow | 84bcff1e814ee5697b5980535583737f8e81d82f | [
"Apache-2.0"
] | 4 | 2017-04-14T07:31:18.000Z | 2021-08-30T11:06:24.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import threading
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of Tensorflow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a Tensorflow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been intialized before returning a session to the training code. The
non-chief tasks depend on the chief taks for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific , and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed
@@stop
@@request_stop
@@should_stop
@@stop_on_exception
@@wait_for_stop
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
# Protects _TENSORFLOW_LAUNCHED
_launch_lock = threading.Lock()
# True if we have already launched the tensorflow in-process server.
_TENSORFLOW_LAUNCHED = False
def __init__(self, graph=None, ready_op=USE_DEFAULT, is_chief=True,
init_op=USE_DEFAULT, init_feed_dict=None,
local_init_op=USE_DEFAULT, logdir=None,
summary_op=USE_DEFAULT, saver=USE_DEFAULT,
global_step=USE_DEFAULT, save_summaries_secs=120,
save_model_secs=600, recovery_wait_secs=30, stop_grace_secs=120,
checkpoint_basename="model.ckpt", session_manager=None,
summary_writer=USE_DEFAULT, init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from merge_all_summaries(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 ot tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(ready_op=ready_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._started_threads = []
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op, graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self, ready_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.initialize_all_variables()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.all_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initilizes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = logging_ops.merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
self._started_threads.extend(threads)
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitely.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
self._started_threads.extend(threads)
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
self._started_threads.append(looper)
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
join_threads = []
join_threads.extend(self._started_threads)
if threads is not None:
join_threads.extend(threads)
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(join_threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
self._started_threads = []
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type == "Variable" and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
| 38.069549 | 80 | 0.697033 |
7945099aaa5bbaf3488a4bdf9426b834cbcdb923 | 645 | py | Python | datasets/text_to_bytes.py | hcngdaniel/VAEFaceRecognition | f13c95675998bb59c4efa1c197ffcaeadc4fd1ed | [
"MIT"
] | null | null | null | datasets/text_to_bytes.py | hcngdaniel/VAEFaceRecognition | f13c95675998bb59c4efa1c197ffcaeadc4fd1ed | [
"MIT"
] | null | null | null | datasets/text_to_bytes.py | hcngdaniel/VAEFaceRecognition | f13c95675998bb59c4efa1c197ffcaeadc4fd1ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
with open('landmarks_in_bytes.txt', 'w', encoding='latin1') as outfile:
with open('landmarks.txt', 'r') as f:
print('counting...')
lines = list(iter(f))
lines_len = len(lines)
print(lines_len)
print('start')
all_num = []
for idx, line in enumerate(lines):
print(f'{(idx + 1)}/{lines_len}')
nums = map(float, line.strip().split(' '))
nums = np.array(list(nums), dtype=np.float32)
all_num.append(nums)
all_num = np.array(all_num).tobytes().decode('latin1')
outfile.write(all_num)
| 33.947368 | 71 | 0.562791 |
794509c3d3f96ca49d617521f4956449d4665ff3 | 1,688 | py | Python | parser_py/__main__.py | MridulS/binder-launches | 8047f9e4a6c1a60d8ef75fbab2d5d135a3158613 | [
"MIT"
] | 1 | 2021-04-08T14:39:47.000Z | 2021-04-08T14:39:47.000Z | parser_py/__main__.py | MridulS/binder-launches | 8047f9e4a6c1a60d8ef75fbab2d5d135a3158613 | [
"MIT"
] | 5 | 2021-04-05T10:37:16.000Z | 2021-04-16T13:28:48.000Z | parser_py/__main__.py | MridulS/binder-launches | 8047f9e4a6c1a60d8ef75fbab2d5d135a3158613 | [
"MIT"
] | 1 | 2021-04-08T14:39:51.000Z | 2021-04-08T14:39:51.000Z | from datetime import datetime
from datetime import timedelta
from time import sleep
from time import time
from .parser import parse
from .settings import load_settings
from .utils import upgrade_db
if __name__ == "__main__":
settings = load_settings()
logger = settings["logger"]
if settings["db_upgrade"]:
logger.info("Upgrading database")
upgrade_db()
delete_old = settings["delete_old"]
since = settings["since"]
until = settings["until"]
while True:
logger.debug(f"parse: {since=}, {until=}, {delete_old=}")
parse(since, until, delete_old)
if settings["continuous"]:
delete_old = True
now = datetime.utcnow()
days_diff = (now.date() - until).days
if days_diff > 0:
today_00 = datetime.combine(now.date(), datetime.min.time())
seconds_diff = (now - today_00).seconds
if seconds_diff < 7200:
# it is before 2 am, sleep until 2 am [UTC]
# so we ensure that yesterday's archive is complete
since = until
logger.info("Sleeping until 2 am [UTC]")
sleep(7200 - seconds_diff)
else:
# add 1 day
since = until + timedelta(1)
until = now.date()
else:
# sleep until beginning of next hour
seconds = 3600 - time() % 3600
logger.info(f"Sleeping {seconds} seconds")
sleep(seconds)
since = now.date()
until = now.date()
else:
break
| 33.76 | 76 | 0.53436 |
79450b42e24fd240e8e5109286bd56facefc99f2 | 616 | py | Python | sol/iniciante/1018 - Cedulas.py | thiagojobson/PyURI | 93a16c9a4cf4fc22e9f3f545a452fe26e7e761e3 | [
"Unlicense"
] | null | null | null | sol/iniciante/1018 - Cedulas.py | thiagojobson/PyURI | 93a16c9a4cf4fc22e9f3f545a452fe26e7e761e3 | [
"Unlicense"
] | null | null | null | sol/iniciante/1018 - Cedulas.py | thiagojobson/PyURI | 93a16c9a4cf4fc22e9f3f545a452fe26e7e761e3 | [
"Unlicense"
] | null | null | null | # 1018 - Cédulas
# https://www.urionlinejudge.com.br/judge/pt/problems/view/1018
# count_banknotes(Tuple[int, ...], int) -> Iterator[Tuple(int, int)]
def count_banknotes(banknotes, value):
counter = {}
for bkn in banknotes:
counter[bkn] = value // bkn
value %= bkn
return sorted(counter.items(), key=lambda x: x[0], reverse=True)
def main():
value = int(input())
banknotes = (100, 50, 20, 10, 5, 2, 1)
print(value)
for bkn, count in count_banknotes(banknotes, value):
print('{} nota(s) de R$ {},00'.format(count, bkn))
if __name__ == '__main__':
main()
| 24.64 | 68 | 0.61526 |
79450b5b3cff7df68ac26a4f5a90fc8c01b4dc35 | 18,859 | py | Python | httpx_caching/_policy.py | johtso/httpx-caching | df7cb162c8986237102e62475338f4bef659468e | [
"Apache-2.0"
] | 35 | 2020-09-26T16:49:49.000Z | 2022-03-20T13:27:54.000Z | httpx_caching/_policy.py | johtso/httpx-caching | df7cb162c8986237102e62475338f4bef659468e | [
"Apache-2.0"
] | 4 | 2021-03-24T17:32:33.000Z | 2022-03-28T18:32:44.000Z | httpx_caching/_policy.py | johtso/httpx-caching | df7cb162c8986237102e62475338f4bef659468e | [
"Apache-2.0"
] | 4 | 2020-09-26T20:07:24.000Z | 2021-10-30T19:15:38.000Z | import calendar
import logging
import time
import typing
from copy import copy
from dataclasses import dataclass
from email.utils import parsedate_tz
from enum import Enum
from typing import Awaitable, Callable, Generator, Iterable, Optional, Tuple, Union
from httpx import ByteStream, Headers, Request, codes
from ._heuristics import BaseHeuristic
from ._models import Response
from ._utils import async_callback_generator, sync_callback_generator
logger = logging.getLogger(__name__)
PERMANENT_REDIRECT_STATUSES = (
301,
308,
)
INVALIDATING_METHODS = ("PUT", "PATCH", "DELETE")
Source = Enum("Source", ["CACHE", "SERVER"])
Evaluation = Enum("Evaluation", ["GOOD", "INCONCLUSIVE"])
CacheVerb = Enum("CacheVerb", ["GET", "SET", "DELETE"])
VaryData = dict
# Cache actions
@dataclass
class CacheGet:
key: str
@dataclass
class CacheSet:
key: str
response: Response
vary_header_values: dict
deferred: bool = False
@dataclass
class CacheDelete:
key: str
CacheAction = Union[CacheGet, CacheSet, CacheDelete]
# HTTP request related IO actions
@dataclass
class MakeRequest:
request: Request
@dataclass
class CloseResponseStream:
response: Response
IOAction = Union[CacheAction, MakeRequest, CloseResponseStream]
AsyncIOCallback = Callable[[IOAction], Awaitable[Optional[Response]]]
SyncIOCallback = Callable[[IOAction], Optional[Response]]
@dataclass
class CachingPolicy:
request: Request
cache_etags: bool
heuristic: Optional[BaseHeuristic]
cacheable_methods: Iterable[str]
cacheable_status_codes: Iterable[int]
@typing.no_type_check
def run(
self,
io_callback: SyncIOCallback,
) -> Tuple[Response, Source]:
# TODO: Shouldn't need to make mypy ignore this should I?
return sync_callback_generator(
caching_policy,
io_callback,
dict(
request=self.request,
cache_etags=self.cache_etags,
heuristic=self.heuristic,
cacheable_methods=self.cacheable_methods,
cacheable_status_codes=self.cacheable_status_codes,
),
)
@typing.no_type_check
async def arun(
self,
io_callback: AsyncIOCallback,
) -> Tuple[Response, Source]:
return await async_callback_generator(
caching_policy,
io_callback,
dict(
request=self.request,
cache_etags=self.cache_etags,
heuristic=self.heuristic,
cacheable_methods=self.cacheable_methods,
cacheable_status_codes=self.cacheable_status_codes,
),
)
def caching_policy(
request: Request,
cache_etags: bool,
heuristic: BaseHeuristic,
cacheable_methods: Tuple[str],
cacheable_status_codes: Tuple[int],
) -> Generator[IOAction, Response, Tuple[Response, Source]]:
cached_response, evaluation = yield from try_from_cache_policy(
request, cacheable_methods
)
print(f"evaluation: {evaluation}")
if cached_response and evaluation == Evaluation.GOOD:
return cached_response, Source.CACHE
response, source = yield from try_from_server_policy(
request,
cached_response,
heuristic,
cache_etags,
cacheable_status_codes,
cacheable_methods,
)
return response, source
def try_from_cache_policy(
request: Request,
cacheable_methods: Iterable[str],
) -> Generator[
CacheAction,
Tuple[Response, VaryData],
Union[Tuple[Response, Evaluation], Tuple[None, None]],
]:
"""
yield cache actions
expects responses in return
may finally return valid response as StopIteration value
"""
# Will only yield GET or DELETE CacheActions. Does not write to cache.
cache_key = get_cache_key(request)
if request.method not in cacheable_methods:
return None, None
cc = parse_cache_control_directives(request.headers)
# Bail out if the request insists on fresh data
if "no-cache" in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return None, None
if cc.get("max-age") == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return None, None
logger.debug(f'Looking up "{cache_key}" in the cache')
cached_response: Optional[Response]
cached_vary_data: dict
cached_response, cached_vary_data = yield CacheGet(cache_key)
if cached_response is None:
logger.debug("No cache entry available")
return None, None
if not check_vary_headers(request.headers, cached_vary_data):
logger.debug("Ignoring cache entry due to vary header mismatch")
return None, None
# If we have a cached permanent redirect, return it immediately. We
# don't need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
#
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if cached_response.status_code in PERMANENT_REDIRECT_STATUSES:
msg = (
"Returning cached permanent redirect response "
"(ignoring date and etag information)"
)
logger.debug(msg)
return cached_response, Evaluation.GOOD
if "date" not in cached_response.headers:
if "etag" not in cached_response.headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug("Purging cached response: no date or etag")
yield CacheDelete(cache_key)
return None, None
logger.debug("Ignoring cached response: no date")
# TODO: Should this return None? Is the cached response now no longer relevant to this request?
return cached_response, Evaluation.INCONCLUSIVE
now = time.time()
# TODO: parsedate_tz might return None (no date value or malformed)
date = calendar.timegm(parsedate_tz(cached_response.headers["date"])) # type: ignore
current_age = max(0, now - date)
logger.debug("Current age based on date: %i", current_age)
resp_cc = parse_cache_control_directives(cached_response.headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if "max-age" in resp_cc:
freshness_lifetime = resp_cc["max-age"]
logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif "expires" in cached_response.headers:
expires = parsedate_tz(cached_response.headers["expires"])
if expires is not None:
expire_time = calendar.timegm(expires) - date # type: ignore
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if "max-age" in cc:
freshness_lifetime = cc["max-age"]
logger.debug("Freshness lifetime from request max-age: %i", freshness_lifetime)
if "min-fresh" in cc:
min_fresh = cc["min-fresh"]
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug("Adjusted current age from min-fresh: %i", current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug("%i > %i", freshness_lifetime, current_age)
return cached_response, Evaluation.GOOD
# we're not fresh. If we don't have an Etag, clear it out
if "etag" not in cached_response.headers:
logger.debug('The cached response is "stale" with no etag, purging')
yield CacheDelete(cache_key)
return None, None
# No conclusive response yet.
return cached_response, Evaluation.INCONCLUSIVE
def try_from_server_policy(
request: Request,
cached_response: Optional[Response],
heuristic: BaseHeuristic,
cache_etags: bool,
cacheable_status_codes: Iterable[int],
cacheable_methods: Iterable[str],
) -> Generator[IOAction, Response, Tuple[Response, Source]]:
cache_key = get_cache_key(request)
print("we have this from the cache:", cached_response)
updated_headers = request.headers.copy()
if cached_response:
# Add conditional headers based on cached response
for source, target in [
("etag", "If-None-Match"),
("last-modified", "If-Modified-Since"),
]:
if source in cached_response.headers:
updated_headers[target] = cached_response.headers[source]
request = Request(
method=request.method,
url=request.url,
headers=updated_headers,
stream=request.stream,
)
server_response = yield MakeRequest(request)
# See if we should invalidate the cache.
if is_invalidating_method(request.method) and not codes.is_error(
server_response.status_code
):
yield CacheDelete(cache_key)
if request.method not in cacheable_methods:
return server_response, Source.SERVER
# Check for any heuristics that might update headers
# before trying to cache.
if heuristic:
# TODO: don't modify things, return things.
heuristic.apply(server_response.headers, server_response.status_code)
# apply any expiration heuristics
if server_response.status_code == 304:
# Make sure to clean up the ETag response stream just in case.
# Compliant servers will not return a body with ETag responses
yield CloseResponseStream(server_response)
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an ETag. In either case, we want to try and
# update the cache if that is the case.
if cached_response:
updated_cached_response = update_with_304_response(
cached_response, new_response_headers=server_response.headers
)
vary_header_values = get_vary_headers(
request.headers, updated_cached_response
)
yield CacheSet(cache_key, updated_cached_response, vary_header_values)
return updated_cached_response, Source.CACHE
return server_response, Source.SERVER
# We have a new response, let's make any changes necessary to the cache (store/delete)
cache_exists = bool(cached_response)
cache_action = cache_response_action(
request,
server_response,
cache_exists,
cache_etags,
cacheable_status_codes,
)
if cache_action:
wrapped_stream_response = yield cache_action
if wrapped_stream_response:
server_response = wrapped_stream_response
return server_response, Source.SERVER
def cache_response_action(
request: Request,
server_response: Response,
cache_exists: bool,
cache_etags: bool,
cacheable_status_codes: Iterable[int],
) -> Optional[Union[CacheSet, CacheDelete]]:
"""
Algorithm for caching responses.
Does some checks on request and response and deletes cache if appropriate
Then either:
No cache
Cache immediately with no body for redirects
Cache with body, this must be deferred.
Returns:
May return a request that has had its stream wrapped to trigger caching once read.
"""
cache_key = get_cache_key(request)
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
if server_response.status_code not in cacheable_status_codes:
logger.debug(
"Status code %s not in %s",
server_response.status_code,
cacheable_status_codes,
)
return None
logger.debug('Updating cache with response from "%s"', cache_key)
# TODO: Do this once on the request/response?
cc_req = parse_cache_control_directives(request.headers)
cc = parse_cache_control_directives(server_response.headers)
# Delete it from the cache if we happen to have it stored there
no_store = False
if "no-store" in cc:
no_store = True
logger.debug('Response header has "no-store"')
if "no-store" in cc_req:
no_store = True
logger.debug('Request header has "no-store"')
if no_store and cache_exists:
logger.debug('Purging existing cache entry to honor "no-store"')
return CacheDelete(cache_key)
if no_store:
return None
# https://tools.ietf.org/html/rfc7234#section-4.1:
# A Vary header field-value of "*" always fails to match.
# Storing such a response leads to a deserialization warning
# during cache lookup and is not allowed to ever be served,
# so storing it can be avoided.
if "*" in server_response.headers.get("vary", ""):
logger.debug('Response header has "Vary: *"')
return None
# If we've been given an etag, then keep the response
if cache_etags and "etag" in server_response.headers:
logger.debug("Caching due to etag")
# Add to the cache any permanent redirects. We do this before looking
# that the Date headers.
elif int(server_response.status_code) in PERMANENT_REDIRECT_STATUSES:
logger.debug("Caching permanent redirect")
response_body = b""
response = Response(
server_response.status_code,
server_response.headers,
# TODO: This is naff, maybe we just use httpx.Response
ByteStream(response_body),
)
vary_header_values = get_vary_headers(request.headers, response)
return CacheSet(cache_key, response, vary_header_values)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif "date" in server_response.headers:
# cache when there is a max-age > 0
if "max-age" in cc and cc["max-age"] > 0:
logger.debug("Caching b/c date exists and max-age > 0")
# If the request can expire, it means we should cache it
# in the meantime.
elif "expires" in server_response.headers:
if server_response.headers["expires"]:
logger.debug("Caching b/c of expires header")
else:
return None
else:
return None
vary_header_values = get_vary_headers(request.headers, server_response)
return CacheSet(cache_key, server_response, vary_header_values, deferred=True)
def get_cache_key(request: Request) -> str:
return str(request.url)
def is_invalidating_method(method: str):
return method in INVALIDATING_METHODS
def parse_cache_control_directives(headers: Headers):
known_directives = {
# https://tools.ietf.org/html/rfc7234#section-5.2
"max-age": (int, True),
"max-stale": (int, False),
"min-fresh": (int, True),
"no-cache": (None, False),
"no-store": (None, False),
"no-transform": (None, False),
"only-if-cached": (None, False),
"must-revalidate": (None, False),
"public": (None, False),
"private": (None, False),
"proxy-revalidate": (None, False),
"s-maxage": (int, True),
}
cc_headers = headers.get("cache-control", "")
retval = {} # type: ignore
for cc_directive in cc_headers.split(","):
if not cc_directive.strip():
continue
parts = cc_directive.split("=", 1)
directive = parts[0].strip()
try:
typ, required = known_directives[directive]
except KeyError:
logger.debug("Ignoring unknown cache-control directive: %s", directive)
continue
if not typ or not required:
retval[directive] = None
if typ:
try:
retval[directive] = typ(parts[1].strip())
except IndexError:
if required:
logger.debug(
"Missing value for cache-control " "directive: %s",
directive,
)
except ValueError:
logger.debug(
"Invalid value for cache-control directive " "%s, must be %s",
directive,
typ.__name__,
)
return retval
def update_with_304_response(
cached_response: Response, new_response_headers: Headers
) -> Response:
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
updated_response = copy(cached_response)
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problematic due to
# typical assumptions.
excluded_headers = ["content-length"]
updated_response.headers.update(
dict(
(k, v)
for k, v in new_response_headers.items()
# TODO: Don't think .lower() is necessary
if k.lower() not in excluded_headers
)
)
# we want a 200 b/c we have content via the cache
updated_response.status_code = 200
return updated_response
def check_vary_headers(request_headers: Headers, cached_vary_data: dict) -> bool:
"""Verify our vary headers match."""
# Ensure that the Vary headers for the cached response match our
# request
# TODO: this should not be here, no reason for request headers to be so deep in deserialization.
for header, value in cached_vary_data.items():
if request_headers.get(header, None) != value:
return False
return True
def get_vary_headers(request_headers: Headers, response: Response):
"""Get vary headers values for persisting in the cache for later checking"""
vary = {}
# Construct our vary headers
if "vary" in response.headers:
varied_headers = response.headers["vary"].split(",")
for header in varied_headers:
header = header.strip()
header_value = request_headers.get(header, None)
vary[header] = header_value
return vary
| 33.028021 | 103 | 0.659261 |
79450b6997be06b3d2f6e74e527b98b16d885883 | 7,700 | py | Python | hbctool/util.py | utpk/hbctool | f55ec0c39f31af862d556c9952a2781bbab0dc63 | [
"MIT"
] | 108 | 2021-01-11T11:32:16.000Z | 2022-03-21T17:25:10.000Z | hbctool/util.py | bbhunter/hbctool | cd229290e98974bb2d52ca9357f17755c7551c75 | [
"MIT"
] | 11 | 2021-01-12T04:41:01.000Z | 2022-03-11T15:48:46.000Z | hbctool/util.py | bbhunter/hbctool | cd229290e98974bb2d52ca9357f17755c7551c75 | [
"MIT"
] | 22 | 2021-01-11T12:06:30.000Z | 2022-03-17T01:37:41.000Z |
from struct import pack, unpack
# File Object
class BitWriter(object):
def __init__(self, f):
self.accumulator = 0
self.bcount = 0
self.out = f
self.write = 0
self.remained = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
def __del__(self):
try:
self.flush()
except ValueError: # I/O operation on closed file.
pass
def _writebit(self, bit, remaining=-1):
if remaining > -1:
self.accumulator |= bit << (remaining - 1)
else:
self.accumulator |= bit << (7 - self.bcount + self.remained)
self.bcount += 1
if self.bcount == 8:
self.flush()
def _clearbits(self, remaining):
self.remained = remaining
def _writebyte(self, b):
assert not self.bcount, "bcount is not zero."
self.out.write(bytes([b]))
self.write += 1
def writebits(self, v, n, remained=False):
i = n
while i > 0:
self._writebit((v & (1 << i-1)) >> (i-1), remaining=(i if remained else -1))
i -= 1
if remained:
self._clearbits(n)
def writebytes(self, v, n):
while n > 0:
self._writebyte(v & 0xff)
v = v >> 8
n -= 1
return v
def flush(self):
self.out.write(bytes([self.accumulator]))
self.accumulator = 0
self.bcount = 0
self.remained = 0
self.write += 1
def seek(self, i):
self.out.seek(i)
self.write = i
def tell(self):
return self.write
def pad(self, alignment):
assert alignment > 0 and alignment <= 8 and ((alignment & (alignment - 1)) == 0), "Support alignment as many as 8 bytes."
l = self.tell()
if l % alignment == 0:
return
b = alignment - (l % alignment)
self.writeall([0] * (b))
def writeall(self, bs):
self.out.write(bytes(bs))
self.write += len(bs)
class BitReader(object):
def __init__(self, f):
self.input = f
self.accumulator = 0
self.bcount = 0
self.read = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def _readbit(self, remaining=-1):
if not self.bcount:
a = self.input.read(1)
self.read += 1
if a:
self.accumulator = ord(a)
self.bcount = 8
if remaining > -1:
assert remaining <= self.bcount, f"WTF ({remaining}, {self.bcount})"
return (self.accumulator & (1 << remaining-1)) >> remaining-1
rv = (self.accumulator & (1 << self.bcount-1)) >> self.bcount-1
self.bcount -= 1
return rv
def _clearbits(self, remaining):
self.bcount -= remaining
self.accumulator = self.accumulator >> remaining
def _readbyte(self):
assert not self.bcount, "bcount is not zero."
a = self.input.read(1)
self.read += 1
return ord(a)
def readbits(self, n, remained=False):
v = 0
i = n
while i > 0:
v = (v << 1) | self._readbit(remaining=(i if remained else -1))
i -= 1
if remained:
self._clearbits(n)
return v
def readbytes(self, n=1):
v = 0
while n > 0:
v = (v << 8) | self._readbyte()
n -= 1
return v
def seek(self, i):
self.input.seek(i)
self.read = i
def tell(self):
return self.read
def pad(self, alignment):
assert alignment > 0 and alignment <= 8 and ((alignment & (alignment - 1)) == 0), "Support alignment as many as 8 bytes."
l = self.tell()
if l % alignment == 0:
return
b = alignment - (l % alignment)
self.seek(l + b)
def readall(self):
a = self.input.read()
self.read += len(a)
return list(a)
# File utilization function
# Read
def readuint(f, bits=64, signed=False):
assert bits % 8 == 0, "Not support"
if bits == 8:
return f.readbytes(1)
x = 0
s = 0
for _ in range(bits//8):
b = f.readbytes(1)
x |= (b & 0xFF) << s
s += 8
if signed and (x & (1<<(bits-1))):
x = - ((1<<(bits)) - x)
if x.bit_length() > bits:
print(f"--> Int {x} longer than {bits} bits")
return x
def readint(f, bits=64):
return readuint(f, bits, signed=True)
def readbits(f, bits=8):
x = 0
s = 0
if f.bcount % 8 != 0 and bits >= f.bcount:
l = f.bcount
b = f.readbits(l)
x |= (b & 0xFF) << s
s += l
bits -= l
for _ in range(bits//8):
b = f.readbits(8)
x |= (b & 0xFF) << s
s += 8
r = bits % 8
if r != 0:
b = f.readbits(r, remained=True)
x |= (b & ((1 << r) - 1)) << s
s+=r
return x
def read(f, format):
type = format[0]
bits = format[1]
n = format[2]
r = []
for i in range(n):
if type == "uint":
r.append(readuint(f, bits=bits))
elif type == "int":
r.append(readint(f, bits=bits))
elif type == "bit":
r.append(readbits(f, bits=bits))
else:
raise Exception(f"Data type {type} is not supported.")
if len(r) == 1:
return r[0]
else:
return r
# Write
def writeuint(f, v, bits=64, signed=False):
assert bits % 8 == 0, "Not support"
if signed:
v += (1 << bits)
if bits == 8:
f.writebytes(v, 1)
return
s = 0
for _ in range(bits//8):
f.writebytes(v & 0xff, 1)
v = v >> 8
s += 8
def writeint(f, v, bits=64):
return writeuint(f, v, bits, signed=True)
def writebits(f, v, bits=8):
s = 0
if f.bcount % 8 != 0 and bits >= 8 - f.bcount:
l = 8 - f.bcount
f.writebits(v & ((1 << l) - 1), l)
v = v >> l
s += l
bits -= l
for _ in range(bits//8):
f.writebits(v & 0xff, 8)
v = v >> 8
s += 8
r = bits % 8
if r != 0:
f.writebits(v & ((1 << bits) - 1), r, remained=True)
v = v >> r
s+=r
def write(f, v, format):
t = format[0]
bits = format[1]
n = format[2]
if not isinstance(v, list):
v = [v]
for i in range(n):
if t == "uint":
writeuint(f, v[i], bits=bits)
elif t == "int":
writeint(f, v[i], bits=bits)
elif t == "bit":
writebits(f, v[i], bits=bits)
else:
raise Exception(f"Data type {t} is not supported.")
# Unpacking
def to_uint8(buf):
return buf[0]
def to_uint16(buf):
return unpack("<H", bytes(buf[:2]))[0]
def to_uint32(buf):
return unpack("<L", bytes(buf[:4]))[0]
def to_int8(buf):
return unpack("<b", bytes([buf[0]]))[0]
def to_int32(buf):
return unpack("<i", bytes(buf[:4]))[0]
def to_double(buf):
return unpack("<d", bytes(buf[:8]))[0]
# Packing
def from_uint8(val):
return [val]
def from_uint16(val):
return list(pack("<H", val))
def from_uint32(val):
return list(pack("<L", val))
def from_int8(val):
return list(pack("<b", val))
def from_int32(val):
return list(pack("<i", val))
def from_double(val):
return list(pack("<d", val))
# Buf Function
def memcpy(dest, src, start, length):
for i in range(length):
dest[start + i] = src[i]
| 22.647059 | 129 | 0.492727 |
79450c3f70792fad3a01a8f2eb7542d10e0a1186 | 13,822 | py | Python | tensorflow/python/kernel_tests/pool_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 1 | 2018-11-15T08:44:10.000Z | 2018-11-15T08:44:10.000Z | tensorflow/python/kernel_tests/pool_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/pool_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 1 | 2020-07-20T18:02:33.000Z | 2020-07-20T18:02:33.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unified pooling functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
def pool_direct_single_axis(input, # pylint: disable=redefined-builtin
axis,
window_size,
pooling_type,
padding,
dilation_rate,
stride):
"""Numpy implementation of pooling along a single axis.
This is intended for testing only, and therefore isn't particularly efficient.
See pool_direct below for the meaning of the arguments.
Args:
input: numpy array.
axis: axis along which to perform pooling.
window_size: int >= 1. Size of pooling window within axis.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: int >= 1. Dilation factor for window, i.e. stride at which
to sample input.
stride: int >= 1. Stride at which to generate output.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
effective_window_size = (window_size - 1) * dilation_rate + 1
input_size = input.shape[axis]
if padding == "SAME":
output_size = int(math.ceil(input_size / stride))
total_padding_amount = max(
0, (output_size - 1) * stride + effective_window_size - input_size)
before_padding = total_padding_amount // 2
elif padding == "VALID":
output_size = int(
math.ceil((input_size - effective_window_size + 1) / stride))
before_padding = 0
else:
raise ValueError("Unsupported padding type: %r" % (padding,))
output_shape = input.shape[:axis] + (output_size,) + input.shape[axis + 1:]
output = np.zeros(output_shape, input.dtype)
initial_dim_selector = tuple(np.s_[:] for _ in range(axis))
if pooling_type == "MAX":
pooling_func = np.max
elif pooling_type == "AVG":
pooling_func = np.mean
else:
raise ValueError("Unsupported pooling type: %r" % (pooling_type,))
for output_pos in range(output_size):
input_start_pos = output_pos * stride - before_padding
input_end_pos = min(input_start_pos + effective_window_size, input_size)
if input_start_pos < 0:
input_start_pos += dilation_rate
input_slice = np.s_[input_start_pos:input_end_pos:dilation_rate]
output[initial_dim_selector + (output_pos,)] = pooling_func(
input[initial_dim_selector + (input_slice,)], axis=axis)
return output
def pool_direct(input, window_shape, pooling_type, padding, # pylint: disable=redefined-builtin
dilation_rate, strides, data_format=None):
"""Numpy implementation of pooling.
This is intended for testing only, and therefore isn't particularly efficient.
See tensorflow.nn.pool.
Args:
input: numpy array of rank N+2.
window_shape: Sequence of N ints >= 1.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: Sequence of N ints >= 1.
strides: Sequence of N ints >= 1.
data_format: If specified and starts with "NC", indicates that second
dimension, rather than the last dimension, specifies the channel.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
if data_format is None or not data_format.startswith("NC"):
spatial_start_dim = 1
else:
spatial_start_dim = 2
output = input
for i in range(len(window_shape)):
output = pool_direct_single_axis(
input=output,
axis=i + spatial_start_dim,
window_size=window_shape[i],
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilation_rate[i],
stride=strides[i])
return output
class PoolingTest(tf.test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
# used.
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
y2 = tf.nn.pool(input=x, **kwargs)
self.assertAllClose(y1, y2.eval(), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
self._test(
input_shape=[1, 1, 10, 1],
window_shape=[1, 3],
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=[1, 2])
def testPool1D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 2], [2, 10, 2]]:
for window_shape in [[1], [2], [3]]:
if padding != "SAME":
for dilation_rate in [[1], [2], [3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2], [3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testPool2D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 2], [2, 10, 9, 2]]:
for window_shape in [[1, 1], [2, 1], [2, 3]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [1, 2], [2, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testPool3D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 11, 2], [2, 10, 9, 11, 2]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 3, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [1, 2, 2],
[2, 3, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [1, 2, 2], [2, 3, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
def testPoolNC(self):
if tf.test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test(input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[1],
dilation_rate=[1],
data_format="NCW")
self._test(input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[2],
dilation_rate=[1],
data_format="NCW")
self._test(input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2],
dilation_rate=[1, 1],
data_format="NCHW")
self._test(input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding="VALID",
pooling_type="MAX",
strides=[1, 1],
dilation_rate=[2, 2],
data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
x = tf.constant(x_val, name="x", dtype=tf.float32)
output = tf.nn.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
err = tf.test.compute_gradient_error(
[x], [input_shape], output, y_shape, x_init_value=[x_val]
)
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
def testGradient1D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 5, 2], [1, 4, 1]]:
for window_shape in [[1], [2]]:
if padding != "SAME":
for dilation_rate in [[1], [2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testGradient2D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 4, 5, 2], [1, 5, 4, 1]]:
for window_shape in [[1, 1], [2, 1], [2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testGradient3D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[1, 3, 5, 4, 1], [1, 5, 4, 3, 1]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
if __name__ == "__main__":
tf.test.main()
| 38.288089 | 96 | 0.530459 |
79450cf78047cedeeeda15fc79dcc6f09934bbf2 | 1,737 | py | Python | Deploy-ML-Model-with-Apache/flask_demo/flask_predict_api.py | SuperBruceJia/pytorch-flask-deploy-webapp | 114f64c11b64fce378d488f90b2f9743e351e302 | [
"MIT"
] | 16 | 2020-09-10T09:40:56.000Z | 2021-12-28T12:56:25.000Z | Deploy-ML-Model-with-Apache/flask_demo/flask_predict_api.py | hexieshenghuo/pytorch-flask-deploy-webapp | a9484c22ad07f4fb7fa472c34344575e89493a77 | [
"MIT"
] | null | null | null | Deploy-ML-Model-with-Apache/flask_demo/flask_predict_api.py | hexieshenghuo/pytorch-flask-deploy-webapp | a9484c22ad07f4fb7fa472c34344575e89493a77 | [
"MIT"
] | 2 | 2020-12-29T01:44:49.000Z | 2021-01-04T09:38:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pickle
from flask import Flask, request
# pip install flasgger==0.8.1
from flasgger import Swagger
import numpy as np
import pandas as pd
with open('/var/www/flask_predict_api/rf.pkl', 'rb') as model_file:
model = pickle.load(model_file)
app = Flask(__name__)
swagger = Swagger(app)
@app.route('/predict', methods=["GET"])
def predict_iris():
"""Example endpoint returning a prediction of iris
---
parameters:
- name: s_length
in: query
type: number
required: true
- name: s_width
in: query
type: number
required: true
- name: p_length
in: query
type: number
required: true
- name: p_width
in: query
type: number
required: true
"""
s_length = request.args.get("s_length")
s_width = request.args.get("s_width")
p_length = request.args.get("p_length")
p_width = request.args.get("p_width")
prediction = model.predict(np.array([[s_length, s_width, p_length, p_width]]))
return str(prediction)
@app.route('/predict_file', methods=["POST"])
def predict_iris_file():
"""Example file endpoint returning a prediction of iris
---
parameters:
- name: input_file
in: formData
type: file
required: true
"""
input_data = pd.read_csv(request.files.get("input_file"), header=None)
prediction = model.predict(input_data)
return str(list(prediction))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
# flasgger -> swagger input
# http://127.0.0.1:5000/predict?s_length=5.7&s_width=5.6&p_length=4.3&p_width=7.8
# http://127.0.0.1:5000/apidocs/
| 24.125 | 82 | 0.630973 |
79450d109d75327a1c617c572fbbba3b61d7b12b | 21,241 | py | Python | tests/test_config.py | melexis/warnings-plugin | 6f821f5c82c2cdf37988d28e651646e1452faf43 | [
"Apache-2.0"
] | 7 | 2017-04-21T12:05:23.000Z | 2019-12-12T13:37:07.000Z | tests/test_config.py | melexis/warnings-plugin | 6f821f5c82c2cdf37988d28e651646e1452faf43 | [
"Apache-2.0"
] | 100 | 2017-04-24T10:50:33.000Z | 2022-02-18T10:39:42.000Z | tests/test_config.py | melexis/warnings-plugin | 6f821f5c82c2cdf37988d28e651646e1452faf43 | [
"Apache-2.0"
] | 2 | 2017-05-23T11:50:59.000Z | 2019-11-06T09:26:33.000Z | from io import StringIO
from unittest import TestCase
from unittest.mock import patch
from mlx.junit_checker import JUnitChecker
from mlx.regex_checker import DoxyChecker, SphinxChecker, XMLRunnerChecker
from mlx.robot_checker import RobotChecker
from mlx.warnings import WarningsPlugin
class TestConfig(TestCase):
def test_configfile_parsing(self):
warnings = WarningsPlugin(config_file="tests/test_in/config_example.json")
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 0)
warnings.check('<testcase classname="dummy_class" name="dummy_name"><failure message="some random message from test case" /></testcase>')
self.assertEqual(warnings.return_count(), 0)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 1)
warnings.check('This should not be treated as warning2')
self.assertEqual(warnings.return_count(), 1)
warnings.check('ERROR [0.000s]: test_some_error_test (something.anything.somewhere)')
self.assertEqual(warnings.return_count(), 1)
def test_configfile_parsing_exclude(self):
warnings = WarningsPlugin(verbose=True, config_file="tests/test_in/config_example_exclude.json")
with patch('sys.stdout', new=StringIO()) as verbose_output:
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 0)
warnings.check('<testcase classname="dummy_class" name="dummy_name"><failure message="some random message from test case" /></testcase>')
self.assertEqual(warnings.return_count(), 0)
deprecation_warning = 'sphinx/application.py:402: RemovedInSphinx20Warning: app.info() is now deprecated. Use sphinx.util.logging instead.'
warnings.check(deprecation_warning)
self.assertEqual(warnings.return_count(), 0)
toctree_warning = "/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'"
warnings.check(toctree_warning)
self.assertEqual(warnings.return_count(), 0) # ignored because of configured "exclude" regex
warnings.check("home/bljah/test/index.rst:5: WARNING: this warning should not get excluded")
self.assertEqual(warnings.return_count(), 1)
warnings.check('This should not be treated as warning2')
self.assertEqual(warnings.return_count(), 1)
warnings.check('ERROR [0.000s]: test_some_error_test (something.anything.somewhere)')
self.assertEqual(warnings.return_count(), 1)
excluded_toctree_warning = "Excluded {!r} because of configured regex {!r}".format(toctree_warning, "WARNING: toctree")
self.assertIn(excluded_toctree_warning, verbose_output.getvalue())
warning_echo = "home/bljah/test/index.rst:5: WARNING: this warning should not get excluded"
self.assertIn(warning_echo, verbose_output.getvalue())
def test_configfile_parsing_include_priority(self):
warnings = WarningsPlugin(verbose=True, config_file="tests/test_in/config_example_exclude.json")
warnings.get_checker('sphinx').include_sphinx_deprecation()
deprecation_warning = 'sphinx/application.py:402: RemovedInSphinx20Warning: app.info() is now deprecated. Use sphinx.util.logging instead.'
warnings.check(deprecation_warning)
self.assertEqual(warnings.return_count(), 1)
def test_partial_sphinx_config_parsing(self):
warnings = WarningsPlugin()
tmpjson = {
'sphinx': {
'enabled': True,
'min': 0,
'max': 0
}
}
warnings.config_parser_json(tmpjson)
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 0)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 0)
warnings.check('ERROR [0.000s]: test_some_error_test (something.anything.somewhere)')
self.assertEqual(warnings.return_count(), 0)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 1)
def test_partial_doxygen_config_parsing(self):
warnings = WarningsPlugin()
tmpjson = {
'doxygen': {
'enabled': True,
'min': 0,
'max': 0
}
}
warnings.config_parser_json(tmpjson)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 0)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 0)
warnings.check('ERROR [0.000s]: test_some_error_test (something.anything.somewhere)')
self.assertEqual(warnings.return_count(), 0)
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 1)
def test_partial_junit_config_parsing(self):
warnings = WarningsPlugin()
tmpjson = {
'junit': {
'enabled': True,
'min': 0,
'max': 0
}
}
warnings.config_parser_json(tmpjson)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 0)
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 0)
warnings.check('ERROR [0.000s]: test_some_error_test (something.anything.somewhere)')
self.assertEqual(warnings.return_count(), 0)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 1)
def test_exclude_feature_type_error(self):
warnings = WarningsPlugin()
tmpjson = {
'junit': {
'enabled': True,
'min': 0,
'max': 0,
"exclude": "able to trace this random failure msg"
}
}
with self.assertRaises(TypeError) as c_m:
warnings.config_parser_json(tmpjson)
self.assertEqual(str(c_m.exception), "Expected a list value for exclude key in configuration file; got str")
def test_partial_junit_config_parsing_exclude_regex(self):
warnings = WarningsPlugin()
tmpjson = {
'junit': {
'enabled': True,
'min': 0,
'max': 0,
"exclude": ["able to trace this random failure msg"]
}
}
warnings.config_parser_json(tmpjson)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 0)
def test_partial_robot_config_parsing_exclude_regex(self):
warnings = WarningsPlugin(verbose=True)
tmpjson = {
'robot': {
'enabled': True,
'suites': [
{
'name': 'Suite One',
'min': 0,
'max': 0,
"exclude": ["does not exist"] # excludes failure in suite
},
{
'name': 'Suite Two',
'min': 1,
'max': 1,
"exclude": ["does not exist"] # no match for failure in suite
}
]
}
}
warnings.config_parser_json(tmpjson)
with open('tests/test_in/robot_double_fail.xml', 'r') as xmlfile:
with patch('sys.stdout', new=StringIO()) as verbose_output:
warnings.check(xmlfile.read())
count = warnings.return_count()
self.assertEqual(count, 1)
self.assertEqual(warnings.return_check_limits(), 0)
self.assertEqual(
'\n'.join([
r"Excluded 'Directory 'C:\\nonexistent' does not exist.' because of configured regex 'does not exist'",
"Suite One & Suite Two.Suite Two.Another test",
"Suite 'Suite One': 0 warnings found",
"Suite 'Suite Two': 1 warnings found",
]) + '\n',
verbose_output.getvalue()
)
def test_partial_robot_config_empty_name(self):
warnings = WarningsPlugin(verbose=True)
tmpjson = {
'robot': {
'enabled': True,
'suites': [
{
'name': '',
'min': 1,
'max': 1,
"exclude": ["does not exist"] # excludes 1 out of 2 failures in suites
}
]
}
}
warnings.config_parser_json(tmpjson)
with open('tests/test_in/robot_double_fail.xml', 'r') as xmlfile:
with patch('sys.stdout', new=StringIO()) as verbose_output:
warnings.check(xmlfile.read())
count = warnings.return_count()
self.assertEqual(count, 1)
self.assertEqual(warnings.return_check_limits(), 0)
self.assertEqual(
'\n'.join([
r"Excluded 'Directory 'C:\\nonexistent' does not exist.' because of configured regex 'does not exist'",
"Suite One & Suite Two.Suite Two.Another test",
"1 warnings found",
]) + '\n',
verbose_output.getvalue()
)
def test_partial_xmlrunner_config_parsing(self):
warnings = WarningsPlugin()
tmpjson = {
'xmlrunner': {
'enabled': True,
'min': 0,
'max': 0
}
}
warnings.config_parser_json(tmpjson)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 0)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 0)
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 0)
warnings.check('ERROR [0.000s]: test_some_error_test (something.anything.somewhere)')
self.assertEqual(warnings.return_count(), 1)
def test_doxy_junit_options_config_parsing(self):
warnings = WarningsPlugin()
tmpjson = {
'doxygen': {
'enabled': True,
'min': 0,
'max': 0
},
'junit': {
'enabled': True,
'min': 0,
'max': 0
}
}
warnings.config_parser_json(tmpjson)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 0)
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 1)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 2)
def test_sphinx_doxy_config_parsing(self):
warnings = WarningsPlugin()
tmpjson = {
'sphinx': {
'enabled': True,
'min': 0,
'max': 0
},
'doxygen': {
'enabled': True,
'min': 0,
'max': 0
}
}
warnings.config_parser_json(tmpjson)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 0)
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 1)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 2)
with open('tests/test_in/junit_single_fail.xml', 'r') as xmlfile:
warnings.check(xmlfile.read())
self.assertEqual(warnings.return_count(), 2)
warnings.check("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(warnings.return_count(), 3)
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 4)
def test_sphinx_config_max(self):
warnings = WarningsPlugin()
tmpjson = {
'sphinx': {
'enabled': True,
'min': 0,
'max': 5
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(SphinxChecker().name).get_maximum(), 5)
def test_doxygen_config_max(self):
warnings = WarningsPlugin()
tmpjson = {
'doxygen': {
'enabled': True,
'min': 0,
'max': 5
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(DoxyChecker().name).get_maximum(), 5)
def test_junit_config_max(self):
warnings = WarningsPlugin()
tmpjson = {
'junit': {
'enabled': True,
'min': 0,
'max': 5
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(JUnitChecker().name).get_maximum(), 5)
def test_xmlrunner_config_max(self):
warnings = WarningsPlugin()
tmpjson = {
'xmlrunner': {
'enabled': True,
'min': 0,
'max': 5
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(XMLRunnerChecker().name).get_maximum(), 5)
def test_all_config_max(self):
warnings = WarningsPlugin()
tmpjson = {
'sphinx': {
'enabled': True,
'min': 0,
'max': 4
},
'doxygen': {
'enabled': True,
'min': 0,
'max': 5
},
'junit': {
'enabled': True,
'min': 0,
'max': 6
},
'xmlrunner': {
'enabled': True,
'min': 0,
'max': 6
},
'robot': {
'enabled': True,
'suites': [
{
'name': 'dummy1',
'min': 5,
'max': 7,
},
{
'name': 'dummy2',
'min': 1,
'max': 9,
},
{
'name': 'dummy3',
'min': 2,
'max': 2,
}
]
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(SphinxChecker().name).get_maximum(), 4)
self.assertEqual(warnings.get_checker(DoxyChecker().name).get_maximum(), 5)
self.assertEqual(warnings.get_checker(JUnitChecker().name).get_maximum(), 6)
self.assertEqual(warnings.get_checker(XMLRunnerChecker().name).get_maximum(), 6)
self.assertEqual(warnings.get_checker(RobotChecker().name).get_maximum(), 9)
def test_sphinx_config_min(self):
warnings = WarningsPlugin()
tmpjson = {
'sphinx': {
'enabled': True,
'min': 5,
'max': 7
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(SphinxChecker().name).get_minimum(), 5)
def test_doxygen_config_min(self):
warnings = WarningsPlugin()
tmpjson = {
'doxygen': {
'enabled': True,
'min': 5,
'max': 7
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(DoxyChecker().name).get_minimum(), 5)
def test_junit_config_min(self):
warnings = WarningsPlugin()
tmpjson = {
'junit': {
'enabled': True,
'min': 5,
'max': 7
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(JUnitChecker().name).get_minimum(), 5)
def test_xmlrunner_config_min(self):
warnings = WarningsPlugin()
tmpjson = {
'xmlrunner': {
'enabled': True,
'min': 5,
'max': 7
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(XMLRunnerChecker().name).get_minimum(), 5)
def test_all_config_min(self):
warnings = WarningsPlugin()
tmpjson = {
'sphinx': {
'enabled': True,
'min': 4,
'max': 7
},
'doxygen': {
'enabled': True,
'min': 3,
'max': 7
},
'junit': {
'enabled': True,
'min': 5,
'max': 7
},
'xmlrunner': {
'enabled': True,
'min': 5,
'max': 7
},
'robot': {
'enabled': True,
'suites': [
{
'name': 'dummy1',
'min': 5,
'max': 7,
},
{
'name': 'dummy2',
'min': 1,
'max': 9,
},
{
'name': 'dummy3',
'min': 2,
'max': 2,
}
]
}
}
warnings.config_parser_json(tmpjson)
self.assertEqual(warnings.get_checker(SphinxChecker().name).get_minimum(), 4)
self.assertEqual(warnings.get_checker(DoxyChecker().name).get_minimum(), 3)
self.assertEqual(warnings.get_checker(JUnitChecker().name).get_minimum(), 5)
self.assertEqual(warnings.get_checker(XMLRunnerChecker().name).get_minimum(), 5)
self.assertEqual(warnings.get_checker(RobotChecker().name).get_minimum(), 1)
def test_invalid_config(self):
warnings = WarningsPlugin()
tmpjson = {
'robot': {
'enabled': True,
'suites': [
{
'name': '',
'min': 5,
'max': 7,
},
{
'name': 'dummy2',
'min': 10,
'max': 9,
},
{
'name': 'dummy3',
'min': 2,
'max': 2,
}
]
}
}
with self.assertRaises(ValueError) as c_m:
warnings.config_parser_json(tmpjson)
self.assertEqual(str(c_m.exception),
'Invalid argument: minimum limit must be lower than maximum limit (9); cannot set 10.')
| 40.001883 | 160 | 0.543195 |
79450e1e45ec95062483bcf47dba8bfe3c3991f1 | 2,654 | py | Python | sql/sqlParser.py | skylarkgit/sql2java | befd55180969b0ec68e242991c3260272d755cc9 | [
"MIT"
] | 2 | 2019-10-23T08:27:30.000Z | 2019-10-23T09:58:45.000Z | sql/sqlParser.py | skylarkgit/sql2java | befd55180969b0ec68e242991c3260272d755cc9 | [
"MIT"
] | null | null | null | sql/sqlParser.py | skylarkgit/sql2java | befd55180969b0ec68e242991c3260272d755cc9 | [
"MIT"
] | null | null | null | from sql.sqlTable import SQLTable
from sql.sqlField import SQLField
from sql.language import *
from sql.sqlDB import SQLDB
from sql.sqlType import SQLType
import enum
class SQL_TOKEN_TYPE (enum.Enum):
OPERATOR = 1
DATATYPE = 2
FUNCTION = 3
PROPERTY = 4
COMMAND = 4
ENTITY = 4
class SQLToken:
def __init__(self, name, type):
self.name = name.lower()
self.type = type
SQL_KEYWORDS = [
SQLToken('CREATE', SQL_TOKEN_TYPE.COMMAND),
SQLToken('DROP', SQL_TOKEN_TYPE.COMMAND),
SQLToken('ENUM', SQL_TOKEN_TYPE.ENTITY),
SQLToken('TABLE', SQL_TOKEN_TYPE.ENTITY),
SQLToken('EXTENSION', SQL_TOKEN_TYPE.ENTITY),
SQLToken('NOT', SQL_TOKEN_TYPE.OPERATOR),
SQLToken('IF', SQL_TOKEN_TYPE.FUNCTION),
SQLToken('EXISTS', SQL_TOKEN_TYPE.FUNCTION),
SQLToken('MIN', SQL_TOKEN_TYPE.FUNCTION),
SQLToken('MAX', SQL_TOKEN_TYPE.FUNCTION),
SQLToken('NOW', SQL_TOKEN_TYPE.FUNCTION),
SQLToken('VARCHAR', SQL_TOKEN_TYPE.DATATYPE),
SQLToken('SERIAL', SQL_TOKEN_TYPE.DATATYPE),
SQLToken('BIGSERIAL', SQL_TOKEN_TYPE.DATATYPE),
SQLToken('FLOAT', SQL_TOKEN_TYPE.DATATYPE),
SQLToken('DOUBLE PRECISION', SQL_TOKEN_TYPE.DATATYPE),
SQLToken('INTEGER', SQL_TOKEN_TYPE.DATATYPE),
SQLToken('UUID', SQL_TOKEN_TYPE.DATATYPE),
SQLToken('NULL', SQL_TOKEN_TYPE.PROPERTY),
SQLToken('PRIMARY KEY', SQL_TOKEN_TYPE.PROPERTY),
SQLToken('ON UPDATE', SQL_TOKEN_TYPE.PROPERTY)
]
class SQLParse:
def __init__(self, name, query):
self.query = query
self.db = SQLDB(name)
query = escapeAnnotations(query)
queries = removeComments(query).split('\n')
# print(queries)
queries = '\n'.join(map(lambda token: (token+' ')[0:token.find("--")].strip(), queries))
queries = queries.split(';')
self.queries = map(lambda token: token.replace("\r\n","").replace("\n","").strip(), queries)
for q in queries:
entity = SQLParse.resolve(self.db, q)
if (isinstance(entity, SQLTable)):
self.db.addTable(entity)
if (isinstance(entity, SQLType)):
self.db.addType(entity)
@staticmethod
def create(db, query):
tokens = query.split(' ')
if (tokens[1].lower().strip() == 'table'):
return SQLTable(db, query)
if (tokens[1].lower().strip() == 'type'):
return SQLType(db, query)
@staticmethod
def resolve(db, query):
tokens = query.split(' ')
if 'create' in tokens[0].lower():
return SQLParse.create(db, query)
def getDB(self):
return self.db | 31.595238 | 100 | 0.636021 |
79450e920cb8757227e132e96359143773c0f1ab | 11,865 | py | Python | arcgispro/src/Import_bif2.py | fmaas90/GeologicToolbox | 4474e3d038388ab25256ba6d24154b4d1c1fcb3b | [
"Apache-2.0"
] | 5 | 2019-03-01T08:59:00.000Z | 2021-06-22T01:54:23.000Z | arcgispro/src/Import_bif2.py | fmaas90/GeologicToolbox | 4474e3d038388ab25256ba6d24154b4d1c1fcb3b | [
"Apache-2.0"
] | 5 | 2019-03-13T09:56:42.000Z | 2019-09-03T08:30:38.000Z | arcgispro/src/Import_bif2.py | fmaas90/GeologicToolbox | 4474e3d038388ab25256ba6d24154b4d1c1fcb3b | [
"Apache-2.0"
] | 7 | 2019-03-12T14:55:41.000Z | 2022-01-20T09:30:05.000Z | import arcpy
import os
import sys
from arcpy import env
Input_BIF2 = arcpy.GetParameterAsText(0)
Output_BIF2 = arcpy.GetParameterAsText(1)
Input_BIF2List = Input_BIF2.split(";")
Continuous_borehole = arcpy.GetParameter(2)
if Continuous_borehole == True:
out_path, out_name = Output_BIF2.split(".gdb\\")
out_path = out_path + ".gdb"
geometry_type = "POLYLINE"
template = ""
has_m = "DISABLED"
has_z = "ENABLED"
Output_BIF2_Line = arcpy.CreateFeatureclass_management(out_path, out_name+"_Continuous_borehole", geometry_type, template, has_m,
has_z)
out_path, out_name = Output_BIF2.split(".gdb\\")
out_path = out_path + ".gdb"
geometry_type = "POLYLINE"
template = ""
has_m = "DISABLED"
has_z = "ENABLED"
Output_BIF2_Splited_Line = arcpy.CreateFeatureclass_management(out_path, out_name,
geometry_type, template, has_m, has_z)
if Continuous_borehole == True:
fields= ["BLIDM", "Name", "Laenge", "Beginn_Kernstrecke", "Ansatzpunkt_Rechtswert", "Ansatzpunkt_Hochwert", "Ansatzpunkt_Hoehe",
"Nadelabweichung", "erster_Bohrtag", "letzter_Bohrtag", "Art_der_Bohrung", "Richtung_der_Bohrung", "Auftraggeber",
"Bohrverfahren", "Gemarkung_Oertlichkeit", "Status", "Neigung_am_Ansatzpunkt", "Richtung_am_Ansatzpunkt", "Bohranlage"]
for row in fields:
arcpy.AddField_management(Output_BIF2_Line,row ,"TEXT")
##################################################################################################################
array = arcpy.Array()
array2 = arcpy.Array()
for BIF2 in Input_BIF2List:
array2.removeAll()
BIF2 = BIF2.replace("'","")
###############################################################################################################
BIF2 = open(BIF2, "r", encoding = "utf-16")
X = 0
Y = 0
Z = 0
Schicht_Gestein_Code_List = list()
Auftraggeber_List = list()
Bohranlage_List = list()
for line in BIF2:
if Continuous_borehole == True:
if line.rsplit()[0] == "Bl.Verlauf.Punkt.Rechtswert:":
X = float(line.rsplit()[1])
if line.rsplit()[0] == "Bl.Verlauf.Punkt.Hochwert:":
Y = float(line.rsplit()[1])
if line.rsplit()[0] == "Bl.Verlauf.Punkt.Hoehe:":
Z = float(line.rsplit()[1])
array.add(arcpy.Point(X,Y,Z))
if line.rsplit()[0] == "Bl.BLIDM:":
BLIDM = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Name:":
Name = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Laenge:":
Laenge = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Beginn_Kernstrecke:":
Beginn_Kernstrecke = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Ansatzpunkt.Rechtswert:":
Ansatzpunkt_Rechtswert = float(line.split(' ', 1)[1])
if line.rsplit()[0] == "Bl.Beschreibung.Ansatzpunkt.Hochwert:":
Ansatzpunkt_Hochwert = float(line.split(' ', 1)[1])
if line.rsplit()[0] == "Bl.Beschreibung.Ansatzpunkt.Hoehe:":
Ansatzpunkt_Hoehe = float(line.split(' ', 1)[1])
array2.add(arcpy.Point(Ansatzpunkt_Rechtswert, Ansatzpunkt_Hochwert, Ansatzpunkt_Hoehe))
if line.rsplit()[0] == "Bl.Beschreibung.Nadelabweichung:":
Nadelabweichung = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.erster_Bohrtag:":
erster_Bohrtag = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.letzter_Bohrtag:":
letzter_Bohrtag = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Art_der_Bohrung:":
Art_der_Bohrung = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Richtung_der_Bohrung:":
Richtung_der_Bohrung = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Auftraggeber.Text:":
Auftraggeber = [line.split(' ', 1)[1]]
Auftraggeber = Auftraggeber[0]
Auftraggeber_List.append(Auftraggeber)
if line.rsplit()[0] == "Bl.Beschreibung.Bohrverfahren:":
Bohrverfahren = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Gemarkung/Oertlichkeit:":
Gemarkung_Oertlichkeit = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Status:":
Status = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Neigung_am_Ansatzpunkt:":
Neigung_am_Ansatzpunkt = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Richtung_am_Ansatzpunkt:":
Richtung_am_Ansatzpunkt = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Beschreibung.Bohranlage.Text:":
Bohranlage = [line.split(' ', 1)[1]]
Bohranlage = Bohranlage[0]
Bohranlage_List.append(Bohranlage)
####################################################################################################
if line.rsplit()[0] == "#Bl.Schicht.Schicht-ID:":
Schicht_Schicht_ID = int(line.split(' ', 1)[1])
if line.rsplit()[0] == "Bl.Schicht.Bohrmeter:":
Schicht_Bohrmeter = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Schicht.Maechtigkeit:":
Schicht_Maechtigkeit = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Schicht.Winkel_am_Kern:":
Schicht_Winkel_am_Kern = line.split(' ', 1)[1]
if line.rsplit()[0] == "Bl.Schicht.Gestein.Code:":
Schicht_Gestein_Code = [line.split(' ', 1)[1]]
Schicht_Gestein_Code = Schicht_Gestein_Code[0]
Schicht_Gestein_Code_List.append(Schicht_Gestein_Code)
if line.rsplit()[0] == "Bl.Schicht.Punkt.Rechtswert:":
Schicht_Punkt_Rechtswert = [line.split(' ', 1)[1]]
Schicht_Punkt_Rechtswert = float(Schicht_Punkt_Rechtswert[0])
if line.rsplit()[0] == "Bl.Schicht.Punkt.Hochwert:":
Schicht_Punkt_Hochwert = [line.split(' ', 1)[1]]
Schicht_Punkt_Hochwert = float(Schicht_Punkt_Hochwert[0])
if line.rsplit()[0] == "Bl.Schicht.Punkt.Hoehe:":
Schicht_Punkt_Hoehe = [line.split(' ', 1)[1]]
Schicht_Punkt_Hoehe = float(Schicht_Punkt_Hoehe[0])
if len(Auftraggeber_List) == 1:
Auftraggeber = ''.join(Auftraggeber_List)
else:
Auftraggeber = str(Auftraggeber_List).replace("\\n'", "").replace("['", "").replace("'", "").replace("]", "")
if len(Bohranlage_List) == 1:
Bohranlage = ''.join(Bohranlage_List)
else:
Bohranlage = str(Bohranlage_List).replace("\\n'", "").replace("['", "").replace("'", "").replace("]", "")
if len(Schicht_Gestein_Code_List) == 1:
Schicht_Gestein_Code = ''.join(Schicht_Gestein_Code_List)
else:
Schicht_Gestein_Code = str(Schicht_Gestein_Code_List).replace("\\n'", "").replace("['", "").replace("'", "").replace("]", "")
point = arcpy.Point(Schicht_Punkt_Rechtswert, Schicht_Punkt_Hochwert, Schicht_Punkt_Hoehe)
array2.add(point)
array3 = arcpy.Array()
array3.add(array2.getObject(Schicht_Schicht_ID-1))
array3.add(array2.getObject(Schicht_Schicht_ID))
polyline2 = arcpy.Polyline(array3,None,True,False)
array3.removeAll()
fields = ["BLIDM", "Name", "Laenge", "Beginn_Kernstrecke", "Ansatzpunkt_Rechtswert",
"Ansatzpunkt_Hochwert", "Ansatzpunkt_Hoehe",
"Nadelabweichung", "erster_Bohrtag", "letzter_Bohrtag", "Art_der_Bohrung",
"Richtung_der_Bohrung", "Auftraggeber",
"Bohrverfahren", "Gemarkung_Oertlichkeit", "Status", "Neigung_am_Ansatzpunkt",
"Richtung_am_Ansatzpunkt", "Bohranlage","Schicht_Schicht_ID", "Schicht_Bohrmeter",
"Schicht_Maechtigkeit", "Schicht_Winkel_am_Kern", "Schicht_Gestein_Code",
"Schicht_Punkt_Rechtswert", "Schicht_Punkt_Hochwert", "Schicht_Punkt_Hoehe"]
for row in fields:
arcpy.AddField_management(Output_BIF2_Splited_Line, row, "TEXT")
cursor = arcpy.da.InsertCursor(Output_BIF2_Splited_Line,
['SHAPE@', 'BLIDM', 'Name', 'Laenge', 'Beginn_Kernstrecke',
'Ansatzpunkt_Rechtswert',
'Ansatzpunkt_Hochwert', 'Ansatzpunkt_Hoehe', 'Nadelabweichung',
'erster_Bohrtag',
'letzter_Bohrtag', 'Art_der_Bohrung', 'Richtung_der_Bohrung',
'Auftraggeber',
'Bohrverfahren', 'Gemarkung_Oertlichkeit', 'Status',
'Neigung_am_Ansatzpunkt',
'Richtung_am_Ansatzpunkt', 'Bohranlage','Schicht_Schicht_ID',
'Schicht_Bohrmeter', 'Schicht_Maechtigkeit', 'Schicht_Winkel_am_Kern',
'Schicht_Gestein_Code', 'Schicht_Punkt_Rechtswert', 'Schicht_Punkt_Hochwert',
'Schicht_Punkt_Hoehe'])
cursor.insertRow([polyline2, BLIDM, Name, Laenge, Beginn_Kernstrecke, Ansatzpunkt_Rechtswert,
Ansatzpunkt_Hochwert, Ansatzpunkt_Hoehe, Nadelabweichung, erster_Bohrtag,
letzter_Bohrtag, Art_der_Bohrung, Richtung_der_Bohrung, Auftraggeber,
Bohrverfahren, Gemarkung_Oertlichkeit, Status, Neigung_am_Ansatzpunkt,
Richtung_am_Ansatzpunkt, Bohranlage, Schicht_Schicht_ID, Schicht_Bohrmeter,
Schicht_Maechtigkeit, Schicht_Winkel_am_Kern, Schicht_Gestein_Code,
Schicht_Punkt_Rechtswert, Schicht_Punkt_Hochwert, Schicht_Punkt_Hoehe])
del cursor
Schicht_Gestein_Code_List.clear()
if Continuous_borehole == True:
polyline = arcpy.Polyline(array,None,True,False)
cursor = arcpy.da.InsertCursor(Output_BIF2_Line,
['SHAPE@', 'BLIDM', 'Name', 'Laenge', 'Beginn_Kernstrecke', 'Ansatzpunkt_Rechtswert',
'Ansatzpunkt_Hochwert', 'Ansatzpunkt_Hoehe', 'Nadelabweichung', 'erster_Bohrtag',
'letzter_Bohrtag', 'Art_der_Bohrung', 'Richtung_der_Bohrung', 'Auftraggeber',
'Bohrverfahren', 'Gemarkung_Oertlichkeit', 'Status', 'Neigung_am_Ansatzpunkt',
'Richtung_am_Ansatzpunkt', 'Bohranlage'])
cursor.insertRow([polyline, BLIDM, Name, Laenge, Beginn_Kernstrecke, Ansatzpunkt_Rechtswert,
Ansatzpunkt_Hochwert, Ansatzpunkt_Hoehe, Nadelabweichung, erster_Bohrtag,
letzter_Bohrtag, Art_der_Bohrung, Richtung_der_Bohrung, Auftraggeber,
Bohrverfahren, Gemarkung_Oertlichkeit, Status, Neigung_am_Ansatzpunkt,
Richtung_am_Ansatzpunkt, Bohranlage])
del cursor
| 45.988372 | 142 | 0.548841 |
79450ecf5f8a55a057d9e40d62c05344ef44c8ea | 2,687 | py | Python | model_2d/scripts/crocoddyl_controller.py | ChristofDubs/DoubleBallBalancer | 6869220ed9f8c5234b00fc653bf05bb7e0bf6737 | [
"Apache-2.0"
] | 3 | 2018-04-08T13:32:26.000Z | 2018-06-29T16:15:50.000Z | model_2d/scripts/crocoddyl_controller.py | ChristofDubs/DoubleBallBalancer | 6869220ed9f8c5234b00fc653bf05bb7e0bf6737 | [
"Apache-2.0"
] | null | null | null | model_2d/scripts/crocoddyl_controller.py | ChristofDubs/DoubleBallBalancer | 6869220ed9f8c5234b00fc653bf05bb7e0bf6737 | [
"Apache-2.0"
] | 1 | 2020-07-18T03:47:41.000Z | 2020-07-18T03:47:41.000Z | """Controller class for controlling 2D Double Ball Balancer
"""
import numpy as np
from model_2d.dynamics_2 import DynamicModel, StateIndex
import crocoddyl
class ActionModel(crocoddyl.ActionModelAbstract):
def __init__(self, param):
crocoddyl.ActionModelAbstract.__init__(self, crocoddyl.StateVector(7), 1, 7) # nu = 1; nr = 7
self.unone = np.zeros(self.nu)
self.des = np.zeros(self.nr)
self.u_des = np.zeros(self.nu)
self.param = param
self.costWeightsState = [10, 0, 0, 4, 8, 4]
self.costWeightsInput = [10]
self.model = DynamicModel(param, np.zeros(StateIndex.NUM_STATES))
self.dt = 0.05
def calc(self, data, x, u=None):
if u is None:
u = self.unone
if not self.model.is_recoverable(x=x[:-1], omega_cmd=u):
data.xnext[:] = x * np.nan
data.cost = np.nan
data.r = np.ones(self.nr) * np.nan
else:
data.xnext[-1] = x[-1] + u
data.xnext[:-1] = x[:-1] + self.model._x_dot(x[:-1], 0, data.xnext[-1]) * self.dt
data.r[:-1] = self.costWeightsState * (data.xnext[:-1] - self.des)
data.r[-1] = self.costWeightsInput[0] * u
data.cost = .5 * sum(data.r**2)
return data.xnext, data.cost
def setSetpoint(self, x, mode):
self.des = x
if mode == StateIndex.ALPHA_DOT_1_IDX:
self.costWeightsState[0] = 0
class Controller:
ANGLE_MODE = StateIndex.ALPHA_1_IDX
VELOCITY_MODE = StateIndex.ALPHA_DOT_1_IDX
def __init__(self, param):
model = ActionModel(param)
self.pred_model = crocoddyl.ActionModelNumDiff(model, True)
self.terminal_model = ActionModel(param)
self.terminal_model.costWeightsState = [5 * x for x in self.terminal_model.costWeightsState]
self.terminal_model = crocoddyl.ActionModelNumDiff(self.terminal_model, True)
def compute_ctrl_input(self, x0, r, mode=ANGLE_MODE):
des = np.zeros(np.shape(x0))
des[mode] = r
self.pred_model.model.setSetpoint(des, mode)
self.terminal_model.model.setSetpoint(des, mode)
model = self.pred_model
T = int(20 / 0.05) # number of knots
problem = crocoddyl.ShootingProblem(np.concatenate([x0, np.array([0])]), [model] * T, self.terminal_model)
# Creating the DDP solver for this OC problem, defining a logger
ddp = crocoddyl.SolverDDP(problem)
ddp.setCallbacks([crocoddyl.CallbackLogger(), crocoddyl.CallbackVerbose()])
# Solving it with the DDP algorithm
ddp.solve([], [], 5)
return np.cumsum(ddp.us), np.array(ddp.xs)[:, :-1]
| 35.826667 | 114 | 0.621139 |
79450f60f23adb803dcbc50a5720e0437c3a0a68 | 2,528 | py | Python | tests/run/for_in_iter.py | minrk/cython | 9422a4b11d9410574a02112c0b54da2917501c9b | [
"Apache-2.0"
] | 2 | 2021-08-20T02:33:24.000Z | 2021-11-17T10:54:00.000Z | tests/run/for_in_iter.py | felix-salfelder/cython | 5446ec2921d6247611978cd17c66e215421b20c4 | [
"Apache-2.0"
] | null | null | null | tests/run/for_in_iter.py | felix-salfelder/cython | 5446ec2921d6247611978cd17c66e215421b20c4 | [
"Apache-2.0"
] | null | null | null | # mode: run
# tag: forin
import sys
import cython
try:
from builtins import next
except ImportError:
def next(it):
return it.next()
def for_in_pyiter_pass(it):
"""
>>> it = Iterable(5)
>>> for_in_pyiter_pass(it)
>>> next(it)
Traceback (most recent call last):
StopIteration
"""
for item in it:
pass
def for_in_pyiter(it):
"""
>>> for_in_pyiter(Iterable(5))
[0, 1, 2, 3, 4]
"""
l = []
for item in it:
l.append(item)
return l
def for_in_list():
"""
>>> for_in_pyiter([1,2,3,4,5])
[1, 2, 3, 4, 5]
"""
@cython.test_assert_path_exists('//TupleNode//IntNode')
@cython.test_fail_if_path_exists('//ListNode//IntNode')
def for_in_literal_list():
"""
>>> for_in_literal_list()
[1, 2, 3, 4]
"""
l = []
for i in [1,2,3,4]:
l.append(i)
return l
class Iterable(object):
"""
>>> for_in_pyiter(Iterable(5))
[0, 1, 2, 3, 4]
"""
def __init__(self, N):
self.N = N
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i < self.N:
i = self.i
self.i += 1
return i
raise StopIteration
next = __next__
if sys.version_info[0] >= 3:
class NextReplacingIterable(object):
def __init__(self):
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i > 5:
raise StopIteration
self.i += 1
self.__next__ = self.next2
return 1
def next2(self):
self.__next__ = self.next3
return 2
def next3(self):
del self.__next__
raise StopIteration
else:
class NextReplacingIterable(object):
def __init__(self):
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i > 5:
raise StopIteration
self.i += 1
self.next = self.next2
return 1
def next2(self):
self.next = self.next3
return 2
def next3(self):
del self.next
raise StopIteration
def for_in_next_replacing_iter():
"""
>>> for_in_pyiter(NextReplacingIterable())
[1, 1, 1, 1, 1, 1]
"""
def for_in_gen(N):
"""
>>> for_in_pyiter(for_in_gen(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for i in range(N):
yield i
| 20.721311 | 55 | 0.506725 |
79450fc56a54ca195c443a859e6fcd59e7b707db | 1,552 | py | Python | LAB/05/0508_KNN.py | LegenDad/KTM_Lab | 09a1671b1dfe9b667008279ef41a959f08babbfc | [
"MIT"
] | null | null | null | LAB/05/0508_KNN.py | LegenDad/KTM_Lab | 09a1671b1dfe9b667008279ef41a959f08babbfc | [
"MIT"
] | null | null | null | LAB/05/0508_KNN.py | LegenDad/KTM_Lab | 09a1671b1dfe9b667008279ef41a959f08babbfc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 8 10:41:11 2018
@author: ktm
"""
from sklearn.datasets import load_breast_cancer
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
cancer = load_breast_cancer()
cancer
print(f"cancer.keys() :\n{cancer.keys()}")
print(f"cancer.data.shape :\n{cancer.data.shape}")
print(f"target : {cancer.target_names}")
cancer_tr, cancer_te, y_tr, y_te = train_test_split(
cancer.data,
cancer.target,
random_state = 777)
print("tr size : {0}{1}".format(cancer_tr.shape, y_tr.shape))
print(f"tr size : {cancer_tr.shape}{y_tr.shape}")
print(f"tr size : {cancer_te.shape}{y_te.shape}")
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 11)
for n in neighbors_settings:
clf = KNeighborsClassifier(n_neighbors=n)
clf.fit(cancer_tr, y_tr)
score_tr = clf.score(cancer_tr, y_tr)
score_test = clf.score(cancer_te, y_te)
training_accuracy.append(score_tr)
test_accuracy.append(score_test)
print(f"k : {n}")
print(f"accuarcy of train : {score_tr:.4f}")
print(f"accuracy of test : {socre_test:.4f}")
print(f"train set accuracy : \n{training_accuracy}")
print(f"test set accuracy : \n{test_accuracy}")
#help(plt.plot)
plt.plot(training_accuracy, test_accuracy, 'bo')
plt.plot(neighbors_settings, training_accuracy, label = 'Training Set')
plt.plot(neighbors_settings, test_accuracy, label = 'Test Set')
plt.legend()
| 28.218182 | 71 | 0.720361 |
7945100e4ede7631d6d23e9f11a34147861ad792 | 2,742 | py | Python | Vigenere.py | Dylan-W-Ray/Pypher | cb3624f41fd4b6971f57710569fec2c2a70bf2c6 | [
"MIT"
] | null | null | null | Vigenere.py | Dylan-W-Ray/Pypher | cb3624f41fd4b6971f57710569fec2c2a70bf2c6 | [
"MIT"
] | null | null | null | Vigenere.py | Dylan-W-Ray/Pypher | cb3624f41fd4b6971f57710569fec2c2a70bf2c6 | [
"MIT"
] | null | null | null | #English alphabet to be used in
#decipher/encipher calculations.
Alpha=['a','b','c','d','e','f','g',
'h','i','j','k','l','m','n',
'o','p','q','r','s','t','u',
'v','w','x','y','z']
def v_cipher(mode, text):
if(mode == "encipher"):
plainTxt = text
cipher = ""
key = input("Key:")
key = key.upper()
if(len(key) > len(plainTxt.strip())):
print("Error key is larger than the message")
exit()
keyIndex = 0
for c in plainTxt:
if(ord(c) >= 65 and ord(c) <= 90):
m = ord(key[keyIndex]) - 65
k = ord(c) - 65
cipherCharNum = (m + k) % 26
cipherChar = Alpha[cipherCharNum].upper()
cipher += cipherChar
keyIndex += 1
keyIndex %= len(key)
elif(ord(c) >= 97 and ord(c) <= 122):
m = ord(key[keyIndex]) - 65
k = ord(c) - 97
cipherCharNum = (m + k) % 26
cipherChar = Alpha[cipherCharNum]
cipher += cipherChar
keyIndex += 1
keyIndex %= len(key)
else:
cipher += c
print("Cipher:", cipher)
elif(mode == "decipher"):
cipher = text
plainTxt = ""
key = input("Key:")
key = key.upper()
if(len(key) > len(cipher.strip())):
print("Error key is larger than the cipher")
exit()
keyIndex = 0
for c in cipher:
if(ord(c) >= 65 and ord(c) <= 90):
k = ord(key[keyIndex]) - 65
cNum = ord(c) - 65
plainCharNum =(26 + cNum - k) % 26
plainTxtChar = Alpha[plainCharNum].upper()
plainTxt += plainTxtChar
keyIndex += 1
keyIndex %= len(key)
elif(ord(c) >= 97 and ord(c) <= 122):
k=ord(key[keyIndex]) - 65
cNum=ord(c) - 97
plainCharNum = (26 + cNum - k ) %26
plainTxtChar=Alpha[plainCharNum]
plainTxt += plainTxtChar
keyIndex += 1
keyIndex %= len(key)
else:
plainTxt += c
print("Message:", plainTxt)
| 30.131868 | 59 | 0.357768 |
794510482991f9ea890d71b69737cce96ded2892 | 511 | py | Python | function.py | yhilpisch/function | 141a9ba815315da1641fadd3c9a98f71970fc391 | [
"MIT"
] | null | null | null | function.py | yhilpisch/function | 141a9ba815315da1641fadd3c9a98f71970fc391 | [
"MIT"
] | null | null | null | function.py | yhilpisch/function | 141a9ba815315da1641fadd3c9a98f71970fc391 | [
"MIT"
] | null | null | null | # coding: utf-8
#
# Simple Function
# (c) The Python Quants
#
def f(x):
''' Simple function to compute the square of a number.
Parameters
==========
x: float
input number
Returns
=======
y: float
(positive) output number
Raises
======
ValueError if x is neither int or float
'''
if type(x) not in [int, float]:
raise ValueError('Parameter must be integer or float.')
y = x * x # this line is changed
return y
| 17.62069 | 63 | 0.53816 |
79451056e07a172e19dcfc696b7cbe8e69c4953b | 1,345 | py | Python | app/crud/shortcuts.py | correaleyval/Telezon-S3 | 1a6de581c73f7b2391207bfd717f0dfc42de0223 | [
"MIT"
] | 12 | 2021-03-18T20:42:19.000Z | 2021-06-08T18:43:05.000Z | app/crud/shortcuts.py | luiscib3r/Telezon-S3 | 1a6de581c73f7b2391207bfd717f0dfc42de0223 | [
"MIT"
] | 1 | 2021-03-19T14:08:51.000Z | 2021-03-19T23:09:55.000Z | app/crud/shortcuts.py | luiscib3r/Telezon-S3 | 1a6de581c73f7b2391207bfd717f0dfc42de0223 | [
"MIT"
] | 1 | 2021-04-11T04:35:14.000Z | 2021-04-11T04:35:14.000Z | from typing import Optional
from fastapi import HTTPException
from motor.motor_asyncio import AsyncIOMotorClient
from pydantic.networks import EmailStr
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
from app.crud.bucket import crud_get_bucket_by_name
from app.crud.user import crud_get_user_by_username, crud_get_user_by_email
async def check_free_username_and_email(
conn: AsyncIOMotorClient, username: Optional[str] = None, email: Optional[EmailStr] = None
):
if username:
user_by_username = await crud_get_user_by_username(conn, username)
if user_by_username:
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
detail="User with this username already exists",
)
if email:
user_by_email = await crud_get_user_by_email(conn, email)
if user_by_email:
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
detail="User with this email already exists",
)
async def check_free_bucket_name(db: AsyncIOMotorClient, name: str):
bucket = await crud_get_bucket_by_name(db, name)
if bucket:
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
detail="Bucket with this name already exists",
)
| 34.487179 | 98 | 0.711524 |
7945113977cb64e2165c9c2d0e1339a5fcf175f3 | 5,066 | py | Python | build/PureCloudPlatformClientV2/models/edge_service_state_request.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | libs/PureCloudPlatformClientV2/models/edge_service_state_request.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | libs/PureCloudPlatformClientV2/models/edge_service_state_request.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class EdgeServiceStateRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
EdgeServiceStateRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'in_service': 'bool',
'call_draining_wait_time_seconds': 'int'
}
self.attribute_map = {
'in_service': 'inService',
'call_draining_wait_time_seconds': 'callDrainingWaitTimeSeconds'
}
self._in_service = None
self._call_draining_wait_time_seconds = None
@property
def in_service(self):
"""
Gets the in_service of this EdgeServiceStateRequest.
A boolean that sets the Edge in-service or out-of-service.
:return: The in_service of this EdgeServiceStateRequest.
:rtype: bool
"""
return self._in_service
@in_service.setter
def in_service(self, in_service):
"""
Sets the in_service of this EdgeServiceStateRequest.
A boolean that sets the Edge in-service or out-of-service.
:param in_service: The in_service of this EdgeServiceStateRequest.
:type: bool
"""
self._in_service = in_service
@property
def call_draining_wait_time_seconds(self):
"""
Gets the call_draining_wait_time_seconds of this EdgeServiceStateRequest.
The number of seconds to wait for call draining to complete before initiating the reboot. A value of 0 will prevent call draining and all calls will disconnect immediately.
:return: The call_draining_wait_time_seconds of this EdgeServiceStateRequest.
:rtype: int
"""
return self._call_draining_wait_time_seconds
@call_draining_wait_time_seconds.setter
def call_draining_wait_time_seconds(self, call_draining_wait_time_seconds):
"""
Sets the call_draining_wait_time_seconds of this EdgeServiceStateRequest.
The number of seconds to wait for call draining to complete before initiating the reboot. A value of 0 will prevent call draining and all calls will disconnect immediately.
:param call_draining_wait_time_seconds: The call_draining_wait_time_seconds of this EdgeServiceStateRequest.
:type: int
"""
self._call_draining_wait_time_seconds = call_draining_wait_time_seconds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.267516 | 180 | 0.623569 |
7945113de3fcc5c4e00abc90e48ffd9ecb366a01 | 10,108 | py | Python | fltk/util/arguments.py | tudelft-eemcs-dml/fltk-testbed-gr-5 | 72afa24a37cd1f8f5f49665c83ccbd730d76ad21 | [
"BSD-2-Clause"
] | 1 | 2022-03-24T10:14:31.000Z | 2022-03-24T10:14:31.000Z | fltk/util/arguments.py | tudelft-eemcs-dml/fltk-testbed-gr-5 | 72afa24a37cd1f8f5f49665c83ccbd730d76ad21 | [
"BSD-2-Clause"
] | 2 | 2021-05-11T12:48:14.000Z | 2021-05-11T12:49:24.000Z | fltk/util/arguments.py | tudelft-eemcs-dml/fltk-testbed-gr-5 | 72afa24a37cd1f8f5f49665c83ccbd730d76ad21 | [
"BSD-2-Clause"
] | 1 | 2022-03-30T09:11:37.000Z | 2022-03-30T09:11:37.000Z | import torch.nn.functional as F
import torch
import json
# Setting the seed for Torch
import yaml
from fltk.nets import Cifar10CNN, FashionMNISTCNN, Cifar100ResNet, FashionMNISTResNet, Cifar10ResNet, Cifar100VGG
SEED = 1
torch.manual_seed(SEED)
class Arguments:
def __init__(self, logger):
self.logger = logger
self.batch_size = 10
self.test_batch_size = 1000
self.epochs = 1
self.lr = 0.001
self.momentum = 0.9
self.cuda = False
self.shuffle = False
self.log_interval = 10
self.kwargs = {}
self.contribution_measurement_round = 1
self.contribution_measurement_metric = 'Influence'
self.scheduler_step_size = 50
self.scheduler_gamma = 0.5
self.min_lr = 1e-10
self.round_worker_selection_strategy = None
self.round_worker_selection_strategy_kwargs = None
self.save_model = False
self.save_temp_model = False
self.save_epoch_interval = 1
self.save_model_path = "models"
self.epoch_save_start_suffix = "start"
self.epoch_save_end_suffix = "end"
self.get_poison_effort = 'half'
self.num_workers = 50
# self.num_poisoned_workers = 10
self.rank = 0
self.world_size = 0
self.data_sampler = None
self.distributed = False
self.available_nets = {
"Cifar100ResNet" : Cifar100ResNet,
"Cifar100VGG" : Cifar100VGG,
"Cifar10CNN" : Cifar10CNN,
"Cifar10ResNet" : Cifar10ResNet,
"FashionMNISTCNN" : FashionMNISTCNN,
"FashionMNISTResNet" : FashionMNISTResNet
}
self.net = None
self.set_net_by_name('Cifar10CNN')
# self.net = FashionMNISTCNN
# self.net = Cifar100ResNet
# self.net = FashionMNISTResNet
# self.net = Cifar10ResNet
# self.net = Cifar10ResNet
self.dataset_name = 'cifar10'
self.train_data_loader_pickle_path = {
'cifar10': 'data_loaders/cifar10/train_data_loader.pickle',
'fashion-mnist': 'data_loaders/fashion-mnist/train_data_loader.pickle',
'cifar100': 'data_loaders/cifar100/train_data_loader.pickle',
}
self.test_data_loader_pickle_path = {
'cifar10': 'data_loaders/cifar10/test_data_loader.pickle',
'fashion-mnist': 'data_loaders/fashion-mnist/test_data_loader.pickle',
'cifar100': 'data_loaders/cifar100/test_data_loader.pickle',
}
# self.train_data_loader_pickle_path = "data_loaders/cifar10/train_data_loader.pickle"
# self.test_data_loader_pickle_path = "data_loaders/cifar10/test_data_loader.pickle"
# self.train_data_loader_pickle_path = "data_loaders/fashion-mnist/train_data_loader.pickle"
# self.test_data_loader_pickle_path = "data_loaders/fashion-mnist/test_data_loader.pickle"
# self.train_data_loader_pickle_path = "data_loaders/cifar100/train_data_loader.pickle"
# self.test_data_loader_pickle_path = "data_loaders/cifar100/test_data_loader.pickle"
self.loss_function = torch.nn.CrossEntropyLoss
self.default_model_folder_path = "default_models"
self.data_path = "data"
def get_distributed(self):
return self.distributed
def get_rank(self):
return self.rank
def get_world_size(self):
return self.world_size
def set_sampler(self, sampler):
self.data_sampler = sampler
def get_sampler(self):
return self.data_sampler
def get_round_worker_selection_strategy(self):
return self.round_worker_selection_strategy
def get_round_worker_selection_strategy_kwargs(self):
return self.round_worker_selection_strategy_kwargs
def set_round_worker_selection_strategy_kwargs(self, kwargs):
self.round_worker_selection_strategy_kwargs = kwargs
def set_client_selection_strategy(self, strategy):
self.round_worker_selection_strategy = strategy
def get_data_path(self):
return self.data_path
def get_epoch_save_start_suffix(self):
return self.epoch_save_start_suffix
def get_epoch_save_end_suffix(self):
return self.epoch_save_end_suffix
def get_dataloader_list(self):
return list(self.train_data_loader_pickle_path.keys())
def get_nets_list(self):
return list(self.available_nets.keys())
def set_train_data_loader_pickle_path(self, path, name='cifar10'):
self.train_data_loader_pickle_path[name] = path
def get_train_data_loader_pickle_path(self):
return self.train_data_loader_pickle_path[self.dataset_name]
def set_test_data_loader_pickle_path(self, path, name='cifar10'):
self.test_data_loader_pickle_path[name] = path
def get_test_data_loader_pickle_path(self):
return self.test_data_loader_pickle_path[self.dataset_name]
def set_net_by_name(self, name: str):
self.net = self.available_nets[name]
# net_dict = {
# 'cifar10-cnn': Cifar10CNN,
# 'fashion-mnist-cnn': FashionMNISTCNN,
# 'cifar100-resnet': Cifar100ResNet,
# 'fashion-mnist-resnet': FashionMNISTResNet,
# 'cifar10-resnet': Cifar10ResNet,
# 'cifar100-vgg': Cifar100VGG,
# }
# self.net = net_dict[name]
def get_cuda(self):
return self.cuda
def get_scheduler_step_size(self):
return self.scheduler_step_size
def get_scheduler_gamma(self):
return self.scheduler_gamma
def get_min_lr(self):
return self.min_lr
def get_default_model_folder_path(self):
return self.default_model_folder_path
def get_num_epochs(self):
return self.epochs
def set_num_poisoned_workers(self, num_poisoned_workers):
self.num_poisoned_workers = num_poisoned_workers
def set_num_workers(self, num_workers):
self.num_workers = num_workers
def set_model_save_path(self, save_model_path):
self.save_model_path = save_model_path
def get_logger(self):
return self.logger
def get_loss_function(self):
return self.loss_function
def get_net(self):
return self.net
def get_num_workers(self):
return self.num_workers
def get_num_poisoned_workers(self):
return self.num_poisoned_workers
def get_poison_effort(self):
return self.get_poison_effort
def get_learning_rate(self):
return self.lr
def get_momentum(self):
return self.momentum
def get_shuffle(self):
return self.shuffle
def get_batch_size(self):
return self.batch_size
def get_test_batch_size(self):
return self.test_batch_size
def get_log_interval(self):
return self.log_interval
def get_save_model_folder_path(self):
return self.save_model_path
def get_learning_rate_from_epoch(self, epoch_idx):
lr = self.lr * (self.scheduler_gamma ** int(epoch_idx / self.scheduler_step_size))
if lr < self.min_lr:
self.logger.warning("Updating LR would place it below min LR. Skipping LR update.")
return self.min_lr
self.logger.debug("LR: {}".format(lr))
return lr
def get_contribution_measurement_round(self):
return self.contribution_measurement_round
def get_contribution_measurement_metric(self):
return self.contribution_measurement_metric
def should_save_model(self, epoch_idx):
"""
Returns true/false models should be saved.
:param epoch_idx: current training epoch index
:type epoch_idx: int
"""
if not self.save_model:
return False
if epoch_idx == 1 or epoch_idx % self.save_epoch_interval == 0:
return True
def log(self):
"""
Log this arguments object to the logger.
"""
self.logger.debug("Arguments: {}", str(self))
def __str__(self):
return "\nBatch Size: {}\n".format(self.batch_size) + \
"Test Batch Size: {}\n".format(self.test_batch_size) + \
"Epochs: {}\n".format(self.epochs) + \
"Learning Rate: {}\n".format(self.lr) + \
"Momentum: {}\n".format(self.momentum) + \
"CUDA Enabled: {}\n".format(self.cuda) + \
"Shuffle Enabled: {}\n".format(self.shuffle) + \
"Log Interval: {}\n".format(self.log_interval) + \
"Scheduler Step Size: {}\n".format(self.scheduler_step_size) + \
"Scheduler Gamma: {}\n".format(self.scheduler_gamma) + \
"Scheduler Minimum Learning Rate: {}\n".format(self.min_lr) + \
"Client Selection Strategy: {}\n".format(self.round_worker_selection_strategy) + \
"Client Selection Strategy Arguments: {}\n".format(json.dumps(self.round_worker_selection_strategy_kwargs, indent=4, sort_keys=True)) + \
"Model Saving Enabled: {}\n".format(self.save_model) + \
"Model Saving Interval: {}\n".format(self.save_epoch_interval) + \
"Model Saving Path (Relative): {}\n".format(self.save_model_path) + \
"Epoch Save Start Prefix: {}\n".format(self.epoch_save_start_suffix) + \
"Epoch Save End Suffix: {}\n".format(self.epoch_save_end_suffix) + \
"Number of Clients: {}\n".format(self.num_workers) + \
"Number of Poisoned Clients: {}\n".format(self.num_poisoned_workers) + \
"NN: {}\n".format(self.net) + \
"Train Data Loader Path: {}\n".format(self.train_data_loader_pickle_path) + \
"Test Data Loader Path: {}\n".format(self.test_data_loader_pickle_path) + \
"Loss Function: {}\n".format(self.loss_function) + \
"Default Model Folder Path: {}\n".format(self.default_model_folder_path) + \
"Data Path: {}\n".format(self.data_path) + \
"Dataset Name: {}\n".format(self.dataset_name) | 34.498294 | 152 | 0.653937 |
7945139c1176316867332232a351ef318cf97467 | 1,016 | py | Python | codes/contour2.py | cRyp70s/opencv-practice | 91b563164ebcc393e52a425c82574e5936b4259d | [
"BSD-3-Clause"
] | 1 | 2021-09-18T18:02:18.000Z | 2021-09-18T18:02:18.000Z | codes/contour2.py | cRyp70s/opencv-practice | 91b563164ebcc393e52a425c82574e5936b4259d | [
"BSD-3-Clause"
] | null | null | null | codes/contour2.py | cRyp70s/opencv-practice | 91b563164ebcc393e52a425c82574e5936b4259d | [
"BSD-3-Clause"
] | null | null | null | import cv2
import numpy as np
img1 = cv2.imread('0-a.png',0)
img2 = cv2.imread('0-b.png',0)
img3 = cv2.imread('0-c.png', 0)
img4 = cv2.imread('a-0.png', 0)
ret, thresh = cv2.threshold(img1, 127, 255,0)
ret, thresh2 = cv2.threshold(img2, 50, 255,0)
ret, thresh3 = cv2.threshold(img3, 127, 255,0)
ret, thresh4 = cv2.threshold(img4, 127, 255,0)
contours,hierarchy = cv2.findContours(thresh,2,1)
cnt1 = contours[0]
contours,hierarchy = cv2.findContours(thresh2,2,1)
cnt2 = contours[0]
contours,hierarchy = cv2.findContours(thresh3,2,1)
cnt3 = contours[0]
contours,hierarchy = cv2.findContours(thresh4,2,1)
cnt4 = contours[0]
ret = cv2.matchShapes(cnt1,cnt2,1,0.0)
print("0-a with 0-b",ret)
ret = cv2.matchShapes(cnt1,cnt3,1,0.0)
print("0-a with 0-c", ret)
ret = cv2.matchShapes(cnt2,cnt3,1,0.0)
print("0-b with 0-c",ret)
ret = cv2.matchShapes(cnt1,cnt4,1,0.0)
print("0-a with a-0",ret)
ret = cv2.matchShapes(cnt2,cnt4,1,0.0)
print("0-b with a-0",ret)
ret = cv2.matchShapes(cnt3,cnt4,1,0.0)
print("0-c with a-0",ret)
| 29.028571 | 50 | 0.697835 |
794513a741e3a4d621c7107de5fc65808370b943 | 154 | py | Python | test/unit/_2020/test_day4.py | Justintime50/adventofcode-2020 | c0d68e7b43c9cbc71dc5c19891c63489087124a6 | [
"MIT"
] | 2 | 2020-12-05T13:25:36.000Z | 2020-12-06T21:59:05.000Z | test/unit/_2020/test_day4.py | Justintime50/adventofcode-2020 | c0d68e7b43c9cbc71dc5c19891c63489087124a6 | [
"MIT"
] | 1 | 2021-12-06T08:06:37.000Z | 2021-12-28T21:45:23.000Z | test/unit/_2020/test_day4.py | Justintime50/adventofcode-2020 | c0d68e7b43c9cbc71dc5c19891c63489087124a6 | [
"MIT"
] | 1 | 2020-12-08T22:45:44.000Z | 2020-12-08T22:45:44.000Z | from adventofcode._2020.day4.challenge import main
def test_input():
answer1, answer2 = main()
assert answer1 == 208
assert answer2 == 167
| 17.111111 | 50 | 0.694805 |
794513d1a35f6377a090d106371adcf0c03580be | 1,371 | py | Python | setup.py | trifacta/nounType | 34478f75fa43e1bfd12bed1c9d624c1a673686d5 | [
"MIT"
] | null | null | null | setup.py | trifacta/nounType | 34478f75fa43e1bfd12bed1c9d624c1a673686d5 | [
"MIT"
] | null | null | null | setup.py | trifacta/nounType | 34478f75fa43e1bfd12bed1c9d624c1a673686d5 | [
"MIT"
] | null | null | null | try:
from setuptools import setup
except ImportError:
raise ImportError("setuptools module required, please go to https://pypi.python.org/pypi/setuptools and follow the instructions for installing setuptools")
setup(
version='0.1',
url='https://github.com/trifacta/nounType',
description='Parses Name entities using Conditional Random Fields',
name='properName',
packages=['properName'],
package_data={'properName': ['nouns_learned_settings.crfsuite']},
license='The MIT License: http://www.opensource.org/licenses/mit-license.php',
install_requires=[
'python-crfsuite>=0.8',
'probableparsing',
'future>=0.14',
'doublemetaphone'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis']
)
| 40.323529 | 159 | 0.646244 |
79451583e04ba65d9bc692c2f6bd131623636a0d | 1,046 | py | Python | workload-account/setup.py | aws-samples/aws-continuous-compliance-for-terraform | 632ba08c05ffd28ff4dc47408d81edc806317e7b | [
"MIT-0"
] | 1 | 2022-01-18T10:45:31.000Z | 2022-01-18T10:45:31.000Z | workload-account/setup.py | aws-samples/aws-continuous-compliance-for-terraform | 632ba08c05ffd28ff4dc47408d81edc806317e7b | [
"MIT-0"
] | null | null | null | workload-account/setup.py | aws-samples/aws-continuous-compliance-for-terraform | 632ba08c05ffd28ff4dc47408d81edc806317e7b | [
"MIT-0"
] | 1 | 2022-03-26T17:06:57.000Z | 2022-03-26T17:06:57.000Z | import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="tools_account",
version="0.0.1",
description="An empty CDK Python app",
long_description=long_description,
long_description_content_type="text/markdown",
author="author",
package_dir={"": "tools_account"},
packages=setuptools.find_packages(where="tools_account"),
install_requires=[
"aws-cdk.core==1.59.0",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
| 22.73913 | 61 | 0.608031 |
794515c19bd23a51dee9146d1aba817b124ef40e | 1,151 | py | Python | tests/debug/test_debug_utils.py | fosterrath-mila/myia | b4382bd6eb53c709136a6142ee517e0b92d7baed | [
"MIT"
] | null | null | null | tests/debug/test_debug_utils.py | fosterrath-mila/myia | b4382bd6eb53c709136a6142ee517e0b92d7baed | [
"MIT"
] | null | null | null | tests/debug/test_debug_utils.py | fosterrath-mila/myia | b4382bd6eb53c709136a6142ee517e0b92d7baed | [
"MIT"
] | null | null | null | """Test some debug utilities that may be used in tests."""
import pytest
from myia.debug.utils import GraphIndex
from myia.pipeline import scalar_parse as parse
def test_GraphIndex():
@parse
def f(x, y):
a = x * y
b = x + y
c = a - b
return c
idx = GraphIndex(f)
assert idx['x'] is f.parameters[0]
assert idx['y'] is f.parameters[1]
assert idx['c'] is f.output
assert idx['a'] is f.output.inputs[1]
assert idx['b'] is f.output.inputs[2]
with pytest.raises(Exception):
idx['d']
def test_GraphIndex_multigraph():
def helper(x):
return x * x
@parse
def f(x, y):
def inner(a):
b = a - 1000
return b
a = inner(x) * helper(y)
return a
idx = GraphIndex(f)
assert idx.get_all('x') == {idx['f'].parameters[0],
idx['helper'].parameters[0]}
assert idx.get_all('y') == {idx['f'].parameters[1]}
assert idx.get_all('a') == {idx['f'].output,
idx['inner'].parameters[0]}
assert idx.get_all('b') == {idx['inner'].output}
| 20.553571 | 60 | 0.531712 |
794515f685594598407dbb3d18c20dcbe1c93362 | 1,215 | py | Python | setup.py | andriyor/chopper | cdb970a9afd9ea6d7e500bc44e1b423e1381981a | [
"MIT"
] | null | null | null | setup.py | andriyor/chopper | cdb970a9afd9ea6d7e500bc44e1b423e1381981a | [
"MIT"
] | null | null | null | setup.py | andriyor/chopper | cdb970a9afd9ea6d7e500bc44e1b423e1381981a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
from chopper import __version__
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='chopper',
version=__version__,
description="Lib to extract html elements by preserving ancestors and cleaning CSS",
long_description=long_description,
author='Jurismarches',
author_email='[email protected]',
url='https://github.com/jurismarches/chopper',
packages=[
'chopper',
'chopper.css',
'chopper.html',
],
install_requires=[
'cssselect>=0.9.1',
'tinycss>=0.3',
'lxml>=4.2.4',
'six>=1.11.0'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
],
test_suite='chopper.tests'
)
| 28.255814 | 88 | 0.595885 |
794516b4cec65b3b4fb85e66919ef8600e178d61 | 789 | py | Python | settings/test.py | de1o/loonflow | 8813461c789c55890fc30916fe56d04a2048d3f5 | [
"MIT"
] | 2 | 2019-08-19T13:47:35.000Z | 2019-08-20T01:46:34.000Z | settings/test.py | LezBaishi/loonflow | 6fa49deba5cb7e5733f427d4b50e9a89a93a667b | [
"MIT"
] | null | null | null | settings/test.py | LezBaishi/loonflow | 6fa49deba5cb7e5733f427d4b50e9a89a93a667b | [
"MIT"
] | 1 | 2022-02-11T13:16:01.000Z | 2022-02-11T13:16:01.000Z | from settings.common import *
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'loonflownew', # Or path to database file if using sqlite3.
'USER': 'loonflownew', # Not used with sqlite3.
'PASSWORD': '123456', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_PASSWORD = '' | 35.863636 | 111 | 0.619772 |
794516c437297e1f2776c48e5572f4e3efba598c | 335,938 | py | Python | hydrus/client/gui/ClientGUI.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | null | null | null | hydrus/client/gui/ClientGUI.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | null | null | null | hydrus/client/gui/ClientGUI.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | null | null | null | import collections
import gc
import hashlib
import os
import random
import re
import ssl
import subprocess
import sys
import threading
import time
import traceback
import cv2
import PIL
import sqlite3
import qtpy
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from hydrus.core import HydrusCompression
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusImageHandling
from hydrus.core import HydrusPaths
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core import HydrusTemp
from hydrus.core import HydrusText
from hydrus.core import HydrusVideoHandling
from hydrus.core.networking import HydrusNetwork
from hydrus.core.networking import HydrusNetworking
from hydrus.client import ClientApplicationCommand as CAC
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientExporting
from hydrus.client import ClientLocation
from hydrus.client import ClientParsing
from hydrus.client import ClientPaths
from hydrus.client import ClientRendering
from hydrus.client import ClientServices
from hydrus.client import ClientThreading
from hydrus.client.gui import ClientGUIAsync
from hydrus.client.gui import ClientGUICore as CGC
from hydrus.client.gui import ClientGUIDialogs
from hydrus.client.gui import ClientGUIDialogsManage
from hydrus.client.gui import ClientGUIDialogsQuick
from hydrus.client.gui import ClientGUIDownloaders
from hydrus.client.gui import ClientGUIDragDrop
from hydrus.client.gui import ClientGUIExport
from hydrus.client.gui import ClientGUIFrames
from hydrus.client.gui import ClientGUIFunctions
from hydrus.client.gui import ClientGUIImport
from hydrus.client.gui import ClientGUILogin
from hydrus.client.gui import ClientGUIMediaControls
from hydrus.client.gui import ClientGUIMenus
from hydrus.client.gui import ClientGUIMPV
from hydrus.client.gui import ClientGUIParsing
from hydrus.client.gui import ClientGUIPopupMessages
from hydrus.client.gui import ClientGUIScrolledPanels
from hydrus.client.gui import ClientGUIScrolledPanelsEdit
from hydrus.client.gui import ClientGUIScrolledPanelsManagement
from hydrus.client.gui import ClientGUIScrolledPanelsReview
from hydrus.client.gui import ClientGUIShortcuts
from hydrus.client.gui import ClientGUIShortcutControls
from hydrus.client.gui import ClientGUISplash
from hydrus.client.gui import ClientGUIStyle
from hydrus.client.gui import ClientGUISubscriptions
from hydrus.client.gui import ClientGUISystemTray
from hydrus.client.gui import ClientGUITags
from hydrus.client.gui import ClientGUITime
from hydrus.client.gui import ClientGUITopLevelWindows
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import QtPorting as QP
from hydrus.client.gui.networking import ClientGUIHydrusNetwork
from hydrus.client.gui.networking import ClientGUINetwork
from hydrus.client.gui.pages import ClientGUIManagement
from hydrus.client.gui.pages import ClientGUIPages
from hydrus.client.gui.pages import ClientGUISession
from hydrus.client.gui.services import ClientGUIClientsideServices
from hydrus.client.gui.services import ClientGUIServersideServices
from hydrus.client.gui.widgets import ClientGUICommon
from hydrus.client.media import ClientMediaResult
from hydrus.client.metadata import ClientTags
MENU_ORDER = [ 'file', 'undo', 'pages', 'database', 'network', 'services', 'tags', 'pending', 'help' ]
def GetTagServiceKeyForMaintenance( win: QW.QWidget ):
tag_services = HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES )
choice_tuples = [ ( 'all services', None, 'Do it for everything. Can take a long time!' ) ]
for service in tag_services:
choice_tuples.append( ( service.GetName(), service.GetServiceKey(), service.GetName() ) )
return ClientGUIDialogsQuick.SelectFromListButtons( win, 'Which service?', choice_tuples )
def THREADUploadPending( service_key ):
finished_all_uploads = False
try:
service = HG.client_controller.services_manager.GetService( service_key )
service_name = service.GetName()
service_type = service.GetServiceType()
if service_type in HC.REPOSITORIES:
account = service.GetAccount()
if account.IsUnknown():
HydrusData.ShowText( 'Your account is currently unsynced, so the upload was cancelled. Please refresh the account under _review services_.' )
return
job_key = ClientThreading.JobKey( pausable = True, cancellable = True )
job_key.SetStatusTitle( 'uploading pending to ' + service_name )
nums_pending = HG.client_controller.Read( 'nums_pending' )
nums_pending_for_this_service = nums_pending[ service_key ]
content_types_for_this_service = set( HC.SERVICE_TYPES_TO_CONTENT_TYPES[ service_type ] )
if service_type in HC.REPOSITORIES:
paused_content_types = set()
unauthorised_content_types = set()
content_types_to_request = set()
content_types_to_count_types_and_permissions = {
HC.CONTENT_TYPE_FILES : ( ( HC.SERVICE_INFO_NUM_PENDING_FILES, HC.PERMISSION_ACTION_CREATE ), ( HC.SERVICE_INFO_NUM_PETITIONED_FILES, HC.PERMISSION_ACTION_PETITION ) ),
HC.CONTENT_TYPE_MAPPINGS : ( ( HC.SERVICE_INFO_NUM_PENDING_MAPPINGS, HC.PERMISSION_ACTION_CREATE ), ( HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS, HC.PERMISSION_ACTION_PETITION ) ),
HC.CONTENT_TYPE_TAG_PARENTS : ( ( HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS, HC.PERMISSION_ACTION_PETITION ), ( HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS, HC.PERMISSION_ACTION_PETITION ) ),
HC.CONTENT_TYPE_TAG_SIBLINGS : ( ( HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS, HC.PERMISSION_ACTION_PETITION ), ( HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS, HC.PERMISSION_ACTION_PETITION ) )
}
for content_type in content_types_for_this_service:
for ( count_type, permission ) in content_types_to_count_types_and_permissions[ content_type ]:
if count_type not in nums_pending_for_this_service:
continue
num_pending = nums_pending_for_this_service[ count_type ]
if num_pending == 0:
continue
if account.HasPermission( content_type, permission ):
if service.IsPausedUpdateProcessing( content_type ):
paused_content_types.add( content_type )
else:
content_types_to_request.add( content_type )
else:
unauthorised_content_types.add( content_type )
if len( unauthorised_content_types ) > 0:
message = 'Unfortunately, your account ({}) does not have full permission to upload all your pending content of type ({})!'.format(
account.GetAccountType().GetTitle(),
', '.join( ( HC.content_type_string_lookup[ content_type ] for content_type in unauthorised_content_types ) )
)
message += os.linesep * 2
message += 'If you are currently using a public, read-only account (such as with the PTR), you may be able to generate your own private account with more permissions. Please hit the button below to open this service in _manage services_ and see if you can generate a new account. If accounts cannot be automatically created, you may have to contact the server owner directly to get this permission.'
message += os.linesep * 2
message += 'If you think your account does have this permission, try refreshing it under _review services_.'
unauthorised_job_key = ClientThreading.JobKey()
unauthorised_job_key.SetStatusTitle( 'some data was not uploaded!' )
unauthorised_job_key.SetVariable( 'popup_text_1', message )
if len( content_types_to_request ) > 0:
unauthorised_job_key.Delete( 120 )
call = HydrusData.Call( HG.client_controller.pub, 'open_manage_services_and_try_to_auto_create_account', service_key )
call.SetLabel( 'open manage services and check for auto-creatable accounts' )
unauthorised_job_key.SetUserCallable( call )
HG.client_controller.pub( 'message', unauthorised_job_key )
if len( paused_content_types ) > 0:
message = 'You have some pending content of type ({}), but processing for that is currently paused! No worries, but I won\'t upload the paused stuff. If you want to upload it, please unpause in _review services_ and then catch up processing.'.format(
', '.join( ( HC.content_type_string_lookup[ content_type ] for content_type in paused_content_types ) )
)
HydrusData.ShowText( message )
else:
content_types_to_request = content_types_for_this_service
if len( content_types_to_request ) == 0:
return
initial_num_pending = sum( nums_pending_for_this_service.values() )
num_to_do = initial_num_pending
result = HG.client_controller.Read( 'pending', service_key, content_types_to_request )
HG.client_controller.pub( 'message', job_key )
no_results_found = result is None
while result is not None:
nums_pending = HG.client_controller.Read( 'nums_pending' )
nums_pending_for_this_service = nums_pending[ service_key ]
remaining_num_pending = sum( nums_pending_for_this_service.values() )
# sometimes more come in while we are pending, -754/1,234 ha ha
num_to_do = max( num_to_do, remaining_num_pending )
num_done = num_to_do - remaining_num_pending
job_key.SetVariable( 'popup_text_1', 'uploading to ' + service_name + ': ' + HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ) )
job_key.SetVariable( 'popup_gauge_1', ( num_done, num_to_do ) )
while job_key.IsPaused() or job_key.IsCancelled():
time.sleep( 0.1 )
if job_key.IsCancelled():
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.SetVariable( 'popup_text_1', 'cancelled' )
HydrusData.Print( job_key.ToString() )
job_key.Delete( 5 )
return
try:
if service_type in HC.REPOSITORIES:
if isinstance( result, ClientMediaResult.MediaResult ):
media_result = result
client_files_manager = HG.client_controller.client_files_manager
hash = media_result.GetHash()
mime = media_result.GetMime()
path = client_files_manager.GetFilePath( hash, mime )
with open( path, 'rb' ) as f:
file_bytes = f.read()
service.Request( HC.POST, 'file', { 'file' : file_bytes } )
file_info_manager = media_result.GetFileInfoManager()
timestamp = HydrusData.GetNow()
content_update_row = ( file_info_manager, timestamp )
content_updates = [ HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ADD, content_update_row ) ]
else:
client_to_server_update = result
service.Request( HC.POST, 'update', { 'client_to_server_update' : client_to_server_update } )
content_updates = client_to_server_update.GetClientsideContentUpdates()
HG.client_controller.WriteSynchronous( 'content_updates', { service_key : content_updates } )
elif service_type == HC.IPFS:
if isinstance( result, ClientMediaResult.MediaResult ):
media_result = result
hash = media_result.GetHash()
mime = media_result.GetMime()
try:
service.PinFile( hash, mime )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'File {} could not be pinned!'.format( hash.hex() ) )
continue
else:
( hash, multihash ) = result
service.UnpinFile( hash, multihash )
except HydrusExceptions.ServerBusyException:
job_key.SetVariable( 'popup_text_1', service.GetName() + ' was busy. please try again in a few minutes' )
job_key.Cancel()
return
HG.client_controller.pub( 'notify_new_pending' )
time.sleep( 0.1 )
HG.client_controller.WaitUntilViewFree()
result = HG.client_controller.Read( 'pending', service_key, content_types_to_request )
finished_all_uploads = result == None
if initial_num_pending > 0 and no_results_found and service_type == HC.TAG_REPOSITORY:
HydrusData.ShowText( 'Hey, your pending menu may have a miscount! It seems like you have pending count, but nothing was found in the database. Please run _database->regenerate->tag storage mappings cache (just pending, instant calculation) when convenient. Make sure it is the "instant, just pending" regeneration!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.SetVariable( 'popup_text_1', 'upload done!' )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
if len( content_types_to_request ) == 0:
job_key.Delete()
else:
job_key.Delete( 5 )
except Exception as e:
r = re.search( '[a-fA-F0-9]{64}', str( e ) )
if r is not None:
possible_hash = bytes.fromhex( r.group() )
HydrusData.ShowText( 'Found a possible hash in that error message--trying to show it in a new page.' )
HG.client_controller.pub( 'imported_files_to_page', [ possible_hash ], 'files that did not upload right' )
job_key.SetVariable( 'popup_text_1', service.GetName() + ' error' )
job_key.Cancel()
raise
finally:
if finished_all_uploads:
if service_type == HC.TAG_REPOSITORY:
types_to_delete = (
HC.SERVICE_INFO_NUM_PENDING_MAPPINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS,
HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS
)
elif service_type in ( HC.FILE_REPOSITORY, HC.IPFS ):
types_to_delete = (
HC.SERVICE_INFO_NUM_PENDING_FILES,
HC.SERVICE_INFO_NUM_PETITIONED_FILES
)
HG.client_controller.Write( 'delete_service_info', service_key, types_to_delete )
HG.client_controller.pub( 'notify_pending_upload_finished', service_key )
class FrameGUI( ClientGUITopLevelWindows.MainFrameThatResizes ):
def __init__( self, controller ):
self._controller = controller
ClientGUITopLevelWindows.MainFrameThatResizes.__init__( self, None, 'main', 'main_gui' )
self._currently_minimised_to_system_tray = False
bandwidth_width = ClientGUIFunctions.ConvertTextToPixelWidth( self, 17 )
idle_width = ClientGUIFunctions.ConvertTextToPixelWidth( self, 6 )
hydrus_busy_width = ClientGUIFunctions.ConvertTextToPixelWidth( self, 11 )
system_busy_width = ClientGUIFunctions.ConvertTextToPixelWidth( self, 13 )
db_width = ClientGUIFunctions.ConvertTextToPixelWidth( self, 14 )
self._statusbar = QP.StatusBar( [ -1, bandwidth_width, idle_width, hydrus_busy_width, system_busy_width, db_width ] )
self._statusbar.setSizeGripEnabled( True )
self.setStatusBar( self._statusbar )
self._statusbar_thread_updater = ClientGUIAsync.FastThreadToGUIUpdater( self._statusbar, self.RefreshStatusBar )
self._canvas_frames = [] # Keep references to canvas frames so they won't get garbage collected (canvas frames don't have a parent)
self._persistent_mpv_widgets = []
self._have_shown_session_size_warning = False
self._closed_pages = []
self._lock = threading.Lock()
self._delayed_dialog_lock = threading.Lock()
self._first_session_loaded = False
self._done_save_and_close = False
self._notebook = ClientGUIPages.PagesNotebook( self, self._controller, 'top page notebook' )
self._garbage_snapshot = collections.Counter()
self._currently_uploading_pending = set()
self._last_clipboard_watched_text = ''
self._clipboard_watcher_destination_page_watcher = None
self._clipboard_watcher_destination_page_urls = None
drop_target = ClientGUIDragDrop.FileDropTarget( self, self.ImportFiles, self.ImportURLFromDragAndDrop, self._notebook.MediaDragAndDropDropped )
self.installEventFilter( ClientGUIDragDrop.FileDropTarget( self, self.ImportFiles, self.ImportURLFromDragAndDrop, self._notebook.MediaDragAndDropDropped ) )
self._notebook.AddSupplementaryTabBarDropTarget( drop_target ) # ugly hack to make the case of files/media dropped onto a tab work
self._message_manager = ClientGUIPopupMessages.PopupMessageManager( self )
self._pending_modal_job_keys = set()
self._widget_event_filter = QP.WidgetEventFilter( self )
self._widget_event_filter.EVT_ICONIZE( self.EventIconize )
self._widget_event_filter.EVT_MOVE( self.EventMove )
self._last_move_pub = 0.0
self._controller.sub( self, 'AddModalMessage', 'modal_message' )
self._controller.sub( self, 'CreateNewSubscriptionGapDownloader', 'make_new_subscription_gap_downloader' )
self._controller.sub( self, 'DeleteOldClosedPages', 'delete_old_closed_pages' )
self._controller.sub( self, 'DoFileStorageRebalance', 'do_file_storage_rebalance' )
self._controller.sub( self, 'NewPageImportHDD', 'new_hdd_import' )
self._controller.sub( self, 'NewPageQuery', 'new_page_query' )
self._controller.sub( self, 'NotifyAdvancedMode', 'notify_advanced_mode' )
self._controller.sub( self, 'NotifyClosedPage', 'notify_closed_page' )
self._controller.sub( self, 'NotifyDeletedPage', 'notify_deleted_page' )
self._controller.sub( self, 'NotifyNewExportFolders', 'notify_new_export_folders' )
self._controller.sub( self, 'NotifyNewImportFolders', 'notify_new_import_folders' )
self._controller.sub( self, 'NotifyNewOptions', 'notify_new_options' )
self._controller.sub( self, 'NotifyNewPages', 'notify_new_pages' )
self._controller.sub( self, 'NotifyNewPending', 'notify_new_pending' )
self._controller.sub( self, 'NotifyNewPermissions', 'notify_new_permissions' )
self._controller.sub( self, 'NotifyNewPermissions', 'notify_account_sync_due' )
self._controller.sub( self, 'NotifyNewServices', 'notify_new_services_gui' )
self._controller.sub( self, 'NotifyNewSessions', 'notify_new_sessions' )
self._controller.sub( self, 'NotifyNewUndo', 'notify_new_undo' )
self._controller.sub( self, 'NotifyPendingUploadFinished', 'notify_pending_upload_finished' )
self._controller.sub( self, 'PresentImportedFilesToPage', 'imported_files_to_page' )
self._controller.sub( self, 'SetDBLockedStatus', 'db_locked_status' )
self._controller.sub( self, 'SetStatusBarDirty', 'set_status_bar_dirty' )
self._controller.sub( self, 'TryToOpenManageServicesForAutoAccountCreation', 'open_manage_services_and_try_to_auto_create_account' )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._notebook, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
self.setCentralWidget( QW.QWidget() )
self.centralWidget().setLayout( vbox )
ClientGUITopLevelWindows.SetInitialTLWSizeAndPosition( self, self._frame_key )
self._was_maximised = self.isMaximized()
self._InitialiseMenubar()
self._RefreshStatusBar()
self._bandwidth_repeating_job = self._controller.CallRepeatingQtSafe( self, 1.0, 1.0, 'repeating bandwidth status update', self.REPEATINGBandwidth )
self._page_update_repeating_job = self._controller.CallRepeatingQtSafe( self, 0.25, 0.25, 'repeating page update', self.REPEATINGPageUpdate )
self._clipboard_watcher_repeating_job = None
self._ui_update_repeating_job = None
self._ui_update_windows = set()
self._animation_update_timer = QC.QTimer( self )
self._animation_update_timer.setTimerType( QC.Qt.PreciseTimer )
self._animation_update_timer.timeout.connect( self.TIMEREventAnimationUpdate )
self._animation_update_windows = set()
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'global', 'main_gui' ] )
self._system_tray_hidden_tlws = []
self._have_system_tray_icon = False
self._system_tray_icon = None
self._have_shown_once = False
if self._controller.new_options.GetBoolean( 'start_client_in_system_tray' ):
self._currently_minimised_to_system_tray = True
self.hide()
self._system_tray_hidden_tlws.append( ( self.isMaximized(), self ) )
else:
self.show()
self._have_shown_once = True
self._UpdateSystemTrayIcon( currently_booting = True )
self._notebook.freshSessionLoaded.connect( self.ReportFreshSessionLoaded )
self._controller.CallLaterQtSafe( self, 0.5, 'initialise session', self._InitialiseSession ) # do this in callafter as some pages want to talk to controller.gui, which doesn't exist yet!
ClientGUIFunctions.UpdateAppDisplayName()
def _AboutWindow( self ):
aboutinfo = QP.AboutDialogInfo()
aboutinfo.SetName( 'hydrus client' )
aboutinfo.SetVersion( str( HC.SOFTWARE_VERSION ) + ', using network version ' + str( HC.NETWORK_VERSION ) )
library_versions = []
# 2.7.12 (v2.7.12:d33e0cf91556, Jun 27 2016, 15:24:40) [MSC v.1500 64 bit (AMD64)]
v = sys.version
if ' ' in v:
v = v.split( ' ' )[0]
library_versions.append( ( 'python', v ) )
library_versions.append( ( 'openssl', ssl.OPENSSL_VERSION ) )
from hydrus.core import HydrusEncryption
if HydrusEncryption.OPENSSL_OK:
library_versions.append( ( 'PyOpenSSL', 'available' ) )
else:
library_versions.append( ( 'PyOpenSSL', 'not available' ) )
library_versions.append( ( 'OpenCV', cv2.__version__ ) )
library_versions.append( ( 'Pillow', PIL.__version__ ) )
if HC.RUNNING_FROM_FROZEN_BUILD and HC.PLATFORM_MACOS:
library_versions.append( ( 'mpv: ', 'is not currently available on macOS' ) )
else:
if ClientGUIMPV.MPV_IS_AVAILABLE:
library_versions.append( ( 'mpv api version: ', ClientGUIMPV.GetClientAPIVersionString() ) )
else:
HydrusData.ShowText( 'If this information helps, MPV failed to import because:' )
HydrusData.ShowText( ClientGUIMPV.mpv_failed_reason )
library_versions.append( ( 'mpv', 'not available' ) )
library_versions.append( ( 'FFMPEG', HydrusVideoHandling.GetFFMPEGVersion() ) )
library_versions.append( ( 'sqlite', sqlite3.sqlite_version ) )
library_versions.append( ( 'Qt', QC.__version__ ) )
if qtpy.PYSIDE2:
import PySide2
import shiboken2
library_versions.append( ( 'PySide2', PySide2.__version__ ) )
library_versions.append( ( 'shiboken2', shiboken2.__version__ ) )
elif qtpy.PYQT5:
from PyQt5.Qt import PYQT_VERSION_STR # pylint: disable=E0401,E0611
from sip import SIP_VERSION_STR # pylint: disable=E0401
library_versions.append( ( 'PyQt5', PYQT_VERSION_STR ) )
library_versions.append( ( 'sip', SIP_VERSION_STR ) )
from hydrus.client.networking import ClientNetworkingJobs
if ClientNetworkingJobs.CLOUDSCRAPER_OK:
library_versions.append( ( 'cloudscraper', ClientNetworkingJobs.cloudscraper.__version__ ) )
else:
library_versions.append( ( 'cloudscraper present: ', 'False' ) )
library_versions.append( ( 'pyparsing present: ', str( ClientNetworkingJobs.PYPARSING_OK ) ) )
library_versions.append( ( 'html5lib present: ', str( ClientParsing.HTML5LIB_IS_OK ) ) )
library_versions.append( ( 'lxml present: ', str( ClientParsing.LXML_IS_OK ) ) )
library_versions.append( ( 'chardet present: ', str( HydrusText.CHARDET_OK ) ) )
library_versions.append( ( 'lz4 present: ', str( HydrusCompression.LZ4_OK ) ) )
library_versions.append( ( 'install dir', HC.BASE_DIR ) )
library_versions.append( ( 'db dir', HG.client_controller.db_dir ) )
library_versions.append( ( 'temp dir', HydrusTemp.GetCurrentTempDir() ) )
library_versions.append( ( 'db journal mode', HG.db_journal_mode ) )
library_versions.append( ( 'db cache size per file', '{}MB'.format( HG.db_cache_size ) ) )
library_versions.append( ( 'db transaction commit period', '{}'.format( HydrusData.TimeDeltaToPrettyTimeDelta( HG.db_cache_size ) ) ) )
library_versions.append( ( 'db synchronous value', str( HG.db_synchronous ) ) )
library_versions.append( ( 'db using memory for temp?', str( HG.no_db_temp_files ) ) )
import locale
l_string = locale.getlocale()[0]
qtl_string = QC.QLocale().name()
library_versions.append( ( 'locale strings', str( ( l_string, qtl_string ) ) ) )
description = 'This client is the media management application of the hydrus software suite.'
description += os.linesep * 2 + os.linesep.join( ( lib + ': ' + version for ( lib, version ) in library_versions ) )
aboutinfo.SetDescription( description )
if os.path.exists( HC.LICENSE_PATH ):
with open( HC.LICENSE_PATH, 'r', encoding = 'utf-8' ) as f:
license = f.read()
else:
license = 'no licence file found!'
aboutinfo.SetLicense( license )
aboutinfo.SetDevelopers( [ 'Anonymous' ] )
aboutinfo.SetWebSite( 'https://hydrusnetwork.github.io/hydrus/' )
QP.AboutBox( self, aboutinfo )
def _AnalyzeDatabase( self ):
message = 'This will gather statistical information on the database\'s indices, helping the query planner perform efficiently. It typically happens automatically every few days, but you can force it here. If you have a large database, it will take a few minutes, during which your gui may hang. A popup message will show its status.'
message += os.linesep * 2
message += 'A \'soft\' analyze will only reanalyze those indices that are due for a check in the normal db maintenance cycle. If nothing is due, it will return immediately.'
message += os.linesep * 2
message += 'A \'full\' analyze will force a run over every index in the database. This can take substantially longer. If you do not have a specific reason to select this, it is probably pointless.'
( result, was_cancelled ) = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose how thorough your analyze will be.', yes_label = 'soft', no_label = 'full', check_for_cancelled = True )
if was_cancelled:
return
if result == QW.QDialog.Accepted:
stop_time = HydrusData.GetNow() + 120
self._controller.Write( 'analyze', maintenance_mode = HC.MAINTENANCE_FORCED, stop_time = stop_time )
elif result == QW.QDialog.Rejected:
self._controller.Write( 'analyze', maintenance_mode = HC.MAINTENANCE_FORCED, force_reanalyze = True )
def _AutoRepoSetup( self ):
host = 'ptr.hydrus.network'
port = 45871
access_key = bytes.fromhex( '4a285629721ca442541ef2c15ea17d1f7f7578b0c3f4f5f2a05f8f0ab297786f' )
ptr_credentials = HydrusNetwork.Credentials( host = host, port = port, access_key = access_key )
def do_it():
all_services = list( self._controller.services_manager.GetServices() )
all_names = [ s.GetName() for s in all_services ]
name = HydrusData.GetNonDupeName( 'public tag repository', all_names )
service_key = HydrusData.GenerateKey()
service_type = HC.TAG_REPOSITORY
public_tag_repo = ClientServices.GenerateService( service_key, service_type, name )
public_tag_repo.SetCredentials( ptr_credentials )
all_services.append( public_tag_repo )
self._controller.SetServices( all_services )
message = 'PTR setup done! Check services->review services to see it.'
message += os.linesep * 2
message += 'The PTR has a lot of tags and will sync a little bit at a time when you are not using the client. Expect it to take a few weeks to sync fully.'
HydrusData.ShowText( message )
have_it_already = False
services = self._controller.services_manager.GetServices( ( HC.TAG_REPOSITORY, ) )
for service in services:
credentials = service.GetCredentials()
if credentials.GetSerialisableTuple() == ptr_credentials.GetSerialisableTuple():
have_it_already = True
break
text = 'This will automatically set up your client with public shared \'read-only\' account for the Public Tag Repository, just as if you had added it manually under services->manage services.'
text += os.linesep * 2
text += 'Over the coming weeks, your client will download updates and then process them into your database in idle time, and the PTR\'s tags will increasingly appear across your files. If you decide to upload tags, it is just a couple of clicks (under services->manage services again) to generate your own account that has permission to do so.'
text += os.linesep * 2
text += 'Be aware that the PTR has been growing since 2011 and now has more than a billion mappings. As of 2021-06, it requires about 6GB of bandwidth and file storage, and your database itself will grow by 50GB! Processing also takes a lot of CPU and HDD work, and, due to the unavoidable mechanical latency of HDDs, will only work in reasonable time if your hydrus database is on an SSD.'
text += os.linesep * 2
text += '++++If you are on a mechanical HDD or will not be able to free up enough space on your SSD, cancel out now.++++'
if have_it_already:
text += os.linesep * 2
text += 'You seem to have the PTR already. If it is paused or desynchronised, this is best fixed under services->review services. Are you sure you want to add a duplicate?'
result = ClientGUIDialogsQuick.GetYesNo( self, text, yes_label = 'do it', no_label = 'not now' )
if result == QW.QDialog.Accepted:
self._controller.CallToThread( do_it )
def _BackupDatabase( self ):
path = self._new_options.GetNoneableString( 'backup_path' )
if path is None:
QW.QMessageBox.warning( self, 'Warning', 'No backup path is set!' )
return
if not os.path.exists( path ):
QW.QMessageBox.information( self, 'Information', 'The backup path does not exist--creating it now.' )
HydrusPaths.MakeSureDirectoryExists( path )
client_db_path = os.path.join( path, 'client.db' )
if os.path.exists( client_db_path ):
action = 'Update the existing'
else:
action = 'Create a new'
text = action + ' backup at "' + path + '"?'
text += os.linesep * 2
text += 'The database will be locked while the backup occurs, which may lock up your gui as well.'
result = ClientGUIDialogsQuick.GetYesNo( self, text )
if result == QW.QDialog.Accepted:
only_changed_page_data = True
about_to_save = True
session = self._notebook.GetCurrentGUISession( CC.LAST_SESSION_SESSION_NAME, only_changed_page_data, about_to_save )
session = self._FleshOutSessionWithCleanDataIfNeeded( self._notebook, CC.LAST_SESSION_SESSION_NAME, session )
self._controller.SaveGUISession( session )
session.SetName( CC.EXIT_SESSION_SESSION_NAME )
self._controller.SaveGUISession( session )
self._controller.Write( 'backup', path )
def _BackupServer( self, service_key ):
def do_it( service ):
started = HydrusData.GetNow()
service.Request( HC.POST, 'backup' )
HydrusData.ShowText( 'Server backup started!' )
time.sleep( 10 )
result_bytes = service.Request( HC.GET, 'busy' )
while result_bytes == b'1':
if HG.started_shutdown:
return
time.sleep( 10 )
result_bytes = service.Request( HC.GET, 'busy' )
it_took = HydrusData.GetNow() - started
HydrusData.ShowText( 'Server backup done in ' + HydrusData.TimeDeltaToPrettyTimeDelta( it_took ) + '!' )
message = 'This will tell the server to lock and copy its database files. It will probably take a few minutes to complete, during which time it will not be able to serve any requests.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
service = self._controller.services_manager.GetService( service_key )
self._controller.CallToThread( do_it, service )
def _BootOrStopClipboardWatcherIfNeeded( self ):
allow_watchers = self._controller.new_options.GetBoolean( 'watch_clipboard_for_watcher_urls' )
allow_other_recognised_urls = self._controller.new_options.GetBoolean( 'watch_clipboard_for_other_recognised_urls' )
if allow_watchers or allow_other_recognised_urls:
if self._clipboard_watcher_repeating_job is None:
self._clipboard_watcher_repeating_job = self._controller.CallRepeatingQtSafe( self, 1.0, 1.0, 'repeating clipboard watcher', self.REPEATINGClipboardWatcher )
else:
if self._clipboard_watcher_destination_page_watcher is not None:
self._clipboard_watcher_repeating_job.Cancel()
self._clipboard_watcher_repeating_job = None
def _CheckDBIntegrity( self ):
message = 'This will check the SQLite database files for missing and invalid data. It may take several minutes to complete.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Run integrity check?', yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'db_integrity' )
def _CheckImportFolder( self, name = None ):
if self._controller.options[ 'pause_import_folders_sync' ]:
HydrusData.ShowText( 'Import folders are currently paused under the \'file\' menu. Please unpause them and try this again.' )
if name is None:
import_folders = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_IMPORT_FOLDER )
else:
import_folder = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_IMPORT_FOLDER, name )
import_folders = [ import_folder ]
for import_folder in import_folders:
import_folder.CheckNow()
self._controller.WriteSynchronous( 'serialisable', import_folder )
self._controller.pub( 'notify_new_import_folders' )
def _ClearFileViewingStats( self ):
text = 'Are you sure you want to delete _all_ file view count/duration and \'last time viewed\' records? This cannot be undone.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILE_VIEWING_STATS, HC.CONTENT_UPDATE_ADVANCED, 'clear' )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] }
self._controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
QW.QMessageBox.information( self, 'Information', 'Delete done! Please restart the client to see the changes in the UI.' )
def _ClearOrphanFiles( self ):
text = 'This will iterate through every file in your database\'s file storage, removing any it does not expect to be there. It may take some time.'
text += os.linesep * 2
text += 'Files and thumbnails will be inaccessible while this occurs, so it is best to leave the client alone until it is done.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
text = 'What would you like to do with the orphaned files? Note that all orphaned thumbnails will be deleted.'
client_files_manager = self._controller.client_files_manager
( result, was_cancelled ) = ClientGUIDialogsQuick.GetYesNo( self, text, title = 'Choose what do to with the orphans.', yes_label = 'move them somewhere', no_label = 'delete them', check_for_cancelled = True )
if was_cancelled:
return
if result == QW.QDialog.Accepted:
with QP.DirDialog( self, 'Select location.' ) as dlg_3:
if dlg_3.exec() == QW.QDialog.Accepted:
path = dlg_3.GetPath()
self._controller.CallToThread( client_files_manager.ClearOrphans, path )
elif result == QW.QDialog.Rejected:
self._controller.CallToThread( client_files_manager.ClearOrphans )
def _ClearOrphanFileRecords( self ):
text = 'DO NOT RUN THIS UNLESS YOU KNOW YOU NEED TO'
text += os.linesep * 2
text += 'This will instruct the database to review its file records and delete any orphans. You typically do not ever see these files and they are basically harmless, but they can offset some file counts confusingly. You probably only need to run this if you can\'t process the apparent last handful of duplicate filter pairs or hydrus dev otherwise told you to try it.'
text += os.linesep * 2
text += 'It will create a popup message while it works and inform you of the number of orphan records found.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'clear_orphan_file_records' )
def _ClearOrphanHashedSerialisables( self ):
text = 'DO NOT RUN THIS UNLESS YOU KNOW YOU NEED TO'
text += os.linesep * 2
text += 'This force-runs a routine that regularly removes some spare data from the database. You most likely do not need to run it.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
controller = self._controller
def do_it():
num_done = controller.WriteSynchronous( 'maintain_hashed_serialisables', force_start = True )
if num_done == 0:
message = 'No orphans found!'
else:
message = '{} orphans cleared!'.format( HydrusData.ToHumanInt( num_done ) )
HydrusData.ShowText( message )
HG.client_controller.CallToThread( do_it )
def _ClearOrphanTables( self ):
text = 'DO NOT RUN THIS UNLESS YOU KNOW YOU NEED TO'
text += os.linesep * 2
text += 'This will instruct the database to review its service tables and delete any orphans. This will typically do nothing, but hydrus dev may tell you to run this, just to check. Be sure you have a recent backup before you run this--if it deletes something important by accident, you will want to roll back!'
text += os.linesep * 2
text += 'It will create popups if it finds anything to delete.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'clear_orphan_tables' )
def _CullFileViewingStats( self ):
text = 'If your file viewing statistics have some erroneous values due to many short views or accidental long views, this routine will cull your current numbers to compensate. For instance:'
text += os.linesep * 2
text += 'If you have a file with 100 views over 100 seconds and a minimum view time of 2 seconds, this will cull the views to 50.'
text += os.linesep * 2
text += 'If you have a file with 10 views over 100000 seconds and a maximum view time of 60 seconds, this will cull the total viewtime to 600 seconds.'
text += os.linesep * 2
text += 'It will work for both preview and media views based on their separate rules.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.WriteSynchronous( 'cull_file_viewing_statistics' )
QW.QMessageBox.information( self, 'Information', 'Cull done! Please restart the client to see the changes in the UI.' )
def _CurrentlyMinimisedOrHidden( self ):
return self.isMinimized() or self._currently_minimised_to_system_tray
def _DebugFetchAURL( self ):
def qt_code( network_job ):
if not self or not QP.isValid( self ):
return
content = network_job.GetContentBytes()
text = 'Request complete. Length of response is ' + HydrusData.ToHumanBytes( len( content ) ) + '.'
yes_tuples = []
yes_tuples.append( ( 'save to file', 'file' ) )
yes_tuples.append( ( 'copy to clipboard', 'clipboard' ) )
with ClientGUIDialogs.DialogYesYesNo( self, text, yes_tuples = yes_tuples, no_label = 'forget it' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
value = dlg.GetValue()
if value == 'file':
with QP.FileDialog( self, 'select where to save content', default_filename = 'result.html', acceptMode = QW.QFileDialog.AcceptSave, fileMode = QW.QFileDialog.AnyFile ) as f_dlg:
if f_dlg.exec() == QW.QDialog.Accepted:
path = f_dlg.GetPath()
with open( path, 'wb' ) as f:
f.write( content )
elif value == 'clipboard':
text = network_job.GetContentText()
self._controller.pub( 'clipboard', 'text', text )
def thread_wait( url ):
from hydrus.client.networking import ClientNetworkingJobs
network_job = ClientNetworkingJobs.NetworkJob( 'GET', url )
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'debug network job' )
job_key.SetNetworkJob( network_job )
self._controller.pub( 'message', job_key )
self._controller.network_engine.AddJob( network_job )
try:
network_job.WaitUntilDone()
finally:
job_key.Delete( seconds = 3 )
QP.CallAfter( qt_code, network_job )
with ClientGUIDialogs.DialogTextEntry( self, 'Enter the URL.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
url = dlg.GetValue()
self._controller.CallToThread( thread_wait, url )
def _DebugMakeDelayedModalPopup( self, cancellable ):
def do_it( controller, cancellable ):
time.sleep( 5 )
job_key = ClientThreading.JobKey( cancellable = cancellable )
job_key.SetStatusTitle( 'debug modal job' )
controller.pub( 'modal_message', job_key )
for i in range( 10 ):
if job_key.IsCancelled():
break
job_key.SetVariable( 'popup_text_1', 'Will auto-dismiss in ' + HydrusData.TimeDeltaToPrettyTimeDelta( 10 - i ) + '.' )
job_key.SetVariable( 'popup_gauge_1', ( i, 10 ) )
time.sleep( 1 )
job_key.Delete()
self._controller.CallToThread( do_it, self._controller, cancellable )
def _DebugLongTextPopup( self ):
words = [ 'test', 'a', 'longish', 'statictext', 'm8' ]
text = random.choice( words )
job_key = ClientThreading.JobKey()
job_key.SetVariable( 'popup_text_1', text )
self._controller.pub( 'message', job_key )
t = 0
for i in range( 2, 64 ):
text += ' {}'.format( random.choice( words ) )
t += 0.2
self._controller.CallLater( t, job_key.SetVariable, 'popup_text_1', text )
words = [ 'test', 'a', 'longish', 'statictext', 'm8' ]
text = random.choice( words )
job_key = ClientThreading.JobKey()
job_key.SetVariable( 'popup_text_1', 'test long title' )
self._controller.pub( 'message', job_key )
for i in range( 2, 64 ):
text += ' {}'.format( random.choice( words ) )
t += 0.2
self._controller.CallLater( t, job_key.SetStatusTitle, text )
def _DebugMakeParentlessTextCtrl( self ):
with QP.Dialog( None, title = 'parentless debug dialog' ) as dlg:
control = QW.QLineEdit( dlg )
control.setText( 'debug test input' )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, control, CC.FLAGS_EXPAND_BOTH_WAYS )
dlg.setLayout( vbox )
dlg.exec()
def _DebugMakeSomePopups( self ):
for i in range( 1, 7 ):
HydrusData.ShowText( 'This is a test popup message -- ' + str( i ) )
brother_classem_pinniped = '''++++What the fuck did you just fucking say about me, you worthless heretic? I'll have you know I graduated top of my aspirant tournament in the Heralds of Ultramar, and I've led an endless crusade of secret raids against the forces of The Great Enemy, and I have over 30 million confirmed purgings. I am trained in armored warfare and I'm the top brother in all the thousand Divine Chapters of the Adeptus Astartes. You are nothing to me but just another heretic. I will wipe you the fuck out with precision the likes of which has never been seen before in this universe, mark my fucking words. You think you can get away with saying that shit to me over the Warp? Think again, traitor. As we speak I am contacting my secret network of inquisitors across the galaxy and your malign powers are being traced right now so you better prepare for the holy storm, maggot. The storm that wipes out the pathetic little thing you call your soul. You're fucking dead, kid. I can warp anywhere, anytime, and I can kill you in over seven hundred ways, and that's just with my bolter. Not only am I extensively trained in unarmed combat, but I have access to the entire arsenal of the Departmento Munitorum and I will use it to its full extent to wipe your miserable ass off the face of the galaxy, you little shit. If only you could have known what holy retribution your little "clever" comment was about to bring down upon you, maybe you would have held your fucking impure tongue. But you couldn't, you didn't, and now you're paying the price, you Emperor-damned heretic.++++\n\n++++Better crippled in body than corrupt in mind++++\n\n++++The Emperor Protects++++'''
HydrusData.ShowText( 'This is a very long message: \n\n' + brother_classem_pinniped )
#
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'This popup has a very long title -- it is a subscription that is running with a long "artist sub 123456" kind of name' )
job_key.SetVariable( 'popup_text_1', 'test' )
self._controller.pub( 'message', job_key )
#
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'user call test' )
job_key.SetVariable( 'popup_text_1', 'click the button m8' )
call = HydrusData.Call( HydrusData.ShowText, 'iv damke' )
call.SetLabel( 'cheeki breeki' )
job_key.SetUserCallable( call )
self._controller.pub( 'message', job_key )
#
service_keys = list( HG.client_controller.services_manager.GetServiceKeys( ( HC.TAG_REPOSITORY, ) ) )
if len( service_keys ) > 0:
service_key = service_keys[0]
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'auto-account creation test' )
call = HydrusData.Call( HG.client_controller.pub, 'open_manage_services_and_try_to_auto_create_account', service_key )
call.SetLabel( 'open manage services and check for auto-creatable accounts' )
job_key.SetUserCallable( call )
HG.client_controller.pub( 'message', job_key )
#
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'sub gap downloader test' )
file_import_options = HG.client_controller.new_options.GetDefaultFileImportOptions( 'quiet' )
from hydrus.client.importing.options import TagImportOptions
tag_import_options = TagImportOptions.TagImportOptions( is_default = True )
call = HydrusData.Call( HG.client_controller.pub, 'make_new_subscription_gap_downloader', ( b'', 'safebooru tag search' ), 'skirt', file_import_options, tag_import_options, 2 )
call.SetLabel( 'start a new downloader for this to fill in the gap!' )
job_key.SetUserCallable( call )
HG.client_controller.pub( 'message', job_key )
#
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( '\u24c9\u24d7\u24d8\u24e2 \u24d8\u24e2 \u24d0 \u24e3\u24d4\u24e2\u24e3 \u24e4\u24dd\u24d8\u24d2\u24de\u24d3\u24d4 \u24dc\u24d4\u24e2\u24e2\u24d0\u24d6\u24d4' )
job_key.SetVariable( 'popup_text_1', '\u24b2\u24a0\u24b2 \u24a7\u249c\u249f' )
job_key.SetVariable( 'popup_text_2', 'p\u0250\u05df \u028d\u01dd\u028d' )
self._controller.pub( 'message', job_key )
#
job_key = ClientThreading.JobKey( pausable = True, cancellable = True )
job_key.SetStatusTitle( 'test job' )
job_key.SetVariable( 'popup_text_1', 'Currently processing test job 5/8' )
job_key.SetVariable( 'popup_gauge_1', ( 5, 8 ) )
self._controller.pub( 'message', job_key )
self._controller.CallLater( 2.0, job_key.SetVariable, 'popup_text_2', 'Pulsing subjob' )
self._controller.CallLater( 2.0, job_key.SetVariable, 'popup_gauge_2', ( 0, None ) )
#
e = HydrusExceptions.DataMissing( 'This is a test exception' )
HydrusData.ShowException( e )
#
for i in range( 1, 4 ):
self._controller.CallLater( 0.5 * i, HydrusData.ShowText, 'This is a delayed popup message -- ' + str( i ) )
def _DebugResetColumnListManager( self ):
message = 'This will reset all saved column widths for all multi-column lists across the program. You may need to restart the client to see changes.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
self._controller.column_list_manager.ResetToDefaults()
def _DebugShowGarbageDifferences( self ):
count = collections.Counter()
for o in gc.get_objects():
count[ type( o ) ] += 1
count.subtract( self._garbage_snapshot )
text = 'Garbage differences start here:'
to_print = list( count.items() )
to_print.sort( key = lambda pair: -pair[1] )
for ( t, count ) in to_print:
if count == 0:
continue
text += os.linesep + '{}: {}'.format( t, HydrusData.ToHumanInt( count ) )
HydrusData.ShowText( text )
def _DebugTakeGarbageSnapshot( self ):
count = collections.Counter()
for o in gc.get_objects():
count[ type( o ) ] += 1
self._garbage_snapshot = count
def _DebugPrintGarbage( self ):
HydrusData.ShowText( 'Printing garbage to log' )
HydrusData.Print( 'uncollectable gc.garbage:' )
count = collections.Counter()
for o in gc.garbage:
count[ type( o ) ] += 1
to_print = list( count.items() )
to_print.sort( key = lambda pair: -pair[1] )
for ( k, v ) in to_print:
HydrusData.Print( ( k, v ) )
del gc.garbage[:]
old_debug = gc.get_debug()
HydrusData.Print( 'running a collect with stats on:' )
gc.set_debug( gc.DEBUG_LEAK | gc.DEBUG_STATS )
gc.collect()
del gc.garbage[:]
gc.set_debug( old_debug )
#
count = collections.Counter()
objects_to_inspect = set()
for o in gc.get_objects():
# add objects to inspect here
count[ type( o ) ] += 1
current_frame = sys._getframe( 0 )
for o in objects_to_inspect:
HydrusData.Print( o )
parents = gc.get_referrers( o )
for parent in parents:
if parent == current_frame or parent == objects_to_inspect:
continue
HydrusData.Print( 'parent {}'.format( parent ) )
grandparents = gc.get_referrers( parent )
for gp in grandparents:
if gp == current_frame or gp == parents:
continue
HydrusData.Print( 'grandparent {}'.format( gp ) )
HydrusData.Print( 'currently tracked types:' )
to_print = list( count.items() )
to_print.sort( key = lambda pair: -pair[1] )
for ( k, v ) in to_print:
if v > 15:
HydrusData.Print( ( k, v ) )
HydrusData.DebugPrint( 'garbage printing finished' )
def _DebugShowScheduledJobs( self ):
self._controller.DebugShowScheduledJobs()
def _DeleteGUISession( self, name ):
message = 'Delete session "' + name + '"?'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Delete session?' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'delete_serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_CONTAINER, name )
self._controller.pub( 'notify_new_sessions' )
def _DeletePending( self, service_key ):
service_name = self._controller.services_manager.GetName( service_key )
message = 'Are you sure you want to delete the pending data for {}?'.format( service_name )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result == QW.QDialog.Accepted:
self._controller.Write( 'delete_pending', service_key )
def _DeleteServiceInfo( self, only_pending = False ):
if only_pending:
types_to_delete = (
HC.SERVICE_INFO_NUM_PENDING_MAPPINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS,
HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS,
HC.SERVICE_INFO_NUM_PENDING_FILES,
HC.SERVICE_INFO_NUM_PETITIONED_FILES
)
message = 'This will clear and regen the number for the pending menu up top. Due to unusual situations and little counting bugs, these numbers can sometimes become unsynced. It should not take long at all, and will update instantly if changed.'
else:
types_to_delete = None
message = 'This clears the cached counts for things like the number of files or tags on a service. Due to unusual situations and little counting bugs, these numbers can sometimes become unsynced. Clearing them forces an accurate recount from source.'
message += os.linesep * 2
message += 'Some GUI elements (review services, mainly) may be slow the next time they launch.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result == QW.QDialog.Accepted:
self._controller.Write( 'delete_service_info', types_to_delete = types_to_delete )
def _DestroyPages( self, pages ):
for page in pages:
if page and QP.isValid( page ):
page.CleanBeforeDestroy()
page.deleteLater()
def _DestroyTimers( self ):
if self._animation_update_timer is not None:
self._animation_update_timer.stop()
self._animation_update_timer = None
def _EnableLoadTruncatedImages( self ):
result = HydrusImageHandling.EnableLoadTruncatedImages()
if not result:
QW.QMessageBox.critical( self, 'Error', 'Could not turn on--perhaps your version of PIL does not support it?' )
def _ExportDownloader( self ):
with ClientGUITopLevelWindowsPanels.DialogNullipotent( self, 'export downloaders' ) as dlg:
panel = ClientGUIParsing.DownloaderExportPanel( dlg, self._controller.network_engine )
dlg.SetPanel( panel )
dlg.exec()
def _FetchIP( self, service_key ):
with ClientGUIDialogs.DialogTextEntry( self, 'Enter the file\'s hash.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
hash = bytes.fromhex( dlg.GetValue() )
service = self._controller.services_manager.GetService( service_key )
with QP.BusyCursor(): response = service.Request( HC.GET, 'ip', { 'hash' : hash } )
ip = response[ 'ip' ]
timestamp = response[ 'timestamp' ]
gmt_time = HydrusData.ConvertTimestampToPrettyTime( timestamp, in_utc = True )
local_time = HydrusData.ConvertTimestampToPrettyTime( timestamp )
text = 'File Hash: ' + hash.hex()
text += os.linesep
text += 'Uploader\'s IP: ' + ip
text += 'Upload Time (GMT): ' + gmt_time
text += 'Upload Time (Your time): ' + local_time
HydrusData.Print( text )
QW.QMessageBox.information( self, 'Information', text+os.linesep*2+'This has been written to the log.' )
def _FindMenuBarIndex( self, name ):
for index in range( len( self._menubar.actions() ) ):
if self._menubar.actions()[ index ].property( 'hydrus_menubar_name' ) == name:
return index
return -1
def _FixLogicallyInconsistentMappings( self ):
message = 'This will check for tags that are occupying mutually exclusive states--either current & pending or deleted & petitioned.'
message += os.linesep * 2
message += 'Please run this if you attempt to upload some tags and get a related error. You may need some follow-up regeneration work to correct autocomplete or \'num pending\' counts.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'fix_logically_inconsistent_mappings', tag_service_key = tag_service_key )
def _FleshOutSessionWithCleanDataIfNeeded( self, notebook: ClientGUIPages.PagesNotebook, name: str, session: ClientGUISession.GUISessionContainer ):
unchanged_page_data_hashes = session.GetUnchangedPageDataHashes()
have_hashed_serialised_objects = self._controller.Read( 'have_hashed_serialised_objects', unchanged_page_data_hashes )
if not have_hashed_serialised_objects:
only_changed_page_data = False
about_to_save = True
session = notebook.GetCurrentGUISession( name, only_changed_page_data, about_to_save )
return session
def _FlipClipboardWatcher( self, option_name ):
self._controller.new_options.FlipBoolean( option_name )
self._controller.WriteSynchronous( 'serialisable', self._controller.new_options )
self._last_clipboard_watched_text = ''
if self._clipboard_watcher_repeating_job is None:
self._BootOrStopClipboardWatcherIfNeeded()
def _FlipShowHideWholeUI( self ):
if not self._currently_minimised_to_system_tray:
visible_tlws = [ tlw for tlw in QW.QApplication.topLevelWidgets() if tlw.isVisible() or tlw.isMinimized() ]
visible_dialogs = [ tlw for tlw in visible_tlws if isinstance( tlw, QW.QDialog ) ]
if len( visible_dialogs ) > 0:
dialog = visible_dialogs[ -1 ]
dialog.activateWindow()
return
page = self.GetCurrentPage()
if page is not None:
page.PageHidden()
HG.client_controller.pub( 'pause_all_media' )
for tlw in visible_tlws:
tlw.hide()
self._system_tray_hidden_tlws.append( ( tlw.isMaximized(), tlw ) )
else:
for ( was_maximised, tlw ) in self._system_tray_hidden_tlws:
if QP.isValid( tlw ):
tlw.show()
self._have_shown_once = True
page = self.GetCurrentPage()
if page is not None:
page.PageShown()
self._system_tray_hidden_tlws = []
self.RestoreOrActivateWindow()
self._currently_minimised_to_system_tray = not self._currently_minimised_to_system_tray
self._UpdateSystemTrayIcon()
def _GenerateNewAccounts( self, service_key ):
with ClientGUIDialogs.DialogGenerateNewAccounts( self, service_key ) as dlg: dlg.exec()
def _HowBonedAmI( self ):
self._controller.file_viewing_stats_manager.Flush()
self._boned_updater.update()
def _ImportDownloaders( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'import downloaders' )
panel = ClientGUIScrolledPanelsReview.ReviewDownloaderImport( frame, self._controller.network_engine )
frame.SetPanel( panel )
def _ImportFiles( self, paths = None ):
if paths is None:
paths = []
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review files to import' )
panel = ClientGUIScrolledPanelsReview.ReviewLocalFileImports( frame, paths )
frame.SetPanel( panel )
def _ImportUpdateFiles( self ):
def do_it( external_update_dir ):
num_errors = 0
filenames = os.listdir( external_update_dir )
update_paths = [ os.path.join( external_update_dir, filename ) for filename in filenames ]
update_paths = list(filter( os.path.isfile, update_paths ))
num_to_do = len( update_paths )
if num_to_do == 0:
QP.CallAfter( QW.QMessageBox.warning, self, 'Warning', 'No files in that directory!' )
return
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'importing updates' )
HG.client_controller.pub( 'message', job_key )
for ( i, update_path ) in enumerate( update_paths ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
job_key.SetVariable( 'popup_text_1', 'Cancelled!' )
return
try:
with open( update_path, 'rb' ) as f:
update_network_bytes = f.read()
update_network_string_hash = hashlib.sha256( update_network_bytes ).digest()
try:
update = HydrusSerialisable.CreateFromNetworkBytes( update_network_bytes )
except:
num_errors += 1
HydrusData.Print( update_path + ' did not load correctly!' )
continue
if isinstance( update, HydrusNetwork.DefinitionsUpdate ):
mime = HC.APPLICATION_HYDRUS_UPDATE_DEFINITIONS
elif isinstance( update, HydrusNetwork.ContentUpdate ):
mime = HC.APPLICATION_HYDRUS_UPDATE_CONTENT
else:
num_errors += 1
HydrusData.Print( update_path + ' was not an update!' )
continue
self._controller.WriteSynchronous( 'import_update', update_network_bytes, update_network_string_hash, mime )
finally:
job_key.SetVariable( 'popup_text_1', HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
job_key.SetVariable( 'popup_gauge_1', ( i, num_to_do ) )
if num_errors == 0:
job_key.SetVariable( 'popup_text_1', 'Done!' )
else:
job_key.SetVariable( 'popup_text_1', 'Done with ' + HydrusData.ToHumanInt( num_errors ) + ' errors (written to the log).' )
finally:
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.Finish()
message = 'This lets you manually import a directory of update files for your repositories. Any update files that match what your repositories are looking for will be automatically linked so they do not have to be downloaded.'
QW.QMessageBox.information( self, 'Information', message )
with QP.DirDialog( self, 'Select location.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
self._controller.CallToThread( do_it, path )
def _ImportURL(
self,
url,
filterable_tags = None,
additional_service_keys_to_tags = None,
destination_page_name = None,
destination_page_key = None,
show_destination_page = True,
allow_watchers = True,
allow_other_recognised_urls = True,
allow_unrecognised_urls = True
):
if filterable_tags is None:
filterable_tags = set()
if additional_service_keys_to_tags is None:
additional_service_keys_to_tags = ClientTags.ServiceKeysToTags()
url = HG.client_controller.network_engine.domain_manager.NormaliseURL( url )
( url_type, match_name, can_parse, cannot_parse_reason ) = self._controller.network_engine.domain_manager.GetURLParseCapability( url )
if url_type in ( HC.URL_TYPE_GALLERY, HC.URL_TYPE_POST, HC.URL_TYPE_WATCHABLE ) and not can_parse:
message = 'This URL was recognised as a "{}" but it cannot be parsed: {}'.format( match_name, cannot_parse_reason )
message += os.linesep * 2
message += 'Since this URL cannot be parsed, a downloader cannot be created for it! Please check your url class links under the \'networking\' menu.'
raise HydrusExceptions.URLClassException( message )
url_caught = False
if ( url_type == HC.URL_TYPE_UNKNOWN and allow_unrecognised_urls ) or ( url_type in ( HC.URL_TYPE_FILE, HC.URL_TYPE_POST, HC.URL_TYPE_GALLERY ) and allow_other_recognised_urls ):
url_caught = True
page = self._notebook.GetOrMakeURLImportPage( desired_page_name = destination_page_name, desired_page_key = destination_page_key, select_page = show_destination_page )
if page is not None:
if show_destination_page:
self._notebook.ShowPage( page )
management_panel = page.GetManagementPanel()
management_panel.PendURL( url, filterable_tags = filterable_tags, additional_service_keys_to_tags = additional_service_keys_to_tags )
return ( url, '"{}" URL added successfully.'.format( match_name ) )
elif url_type == HC.URL_TYPE_WATCHABLE and allow_watchers:
url_caught = True
page = self._notebook.GetOrMakeMultipleWatcherPage( desired_page_name = destination_page_name, desired_page_key = destination_page_key, select_page = show_destination_page )
if page is not None:
if show_destination_page:
self._notebook.ShowPage( page )
management_panel = page.GetManagementPanel()
management_panel.PendURL( url, filterable_tags = filterable_tags, additional_service_keys_to_tags = additional_service_keys_to_tags )
return ( url, '"{}" URL added successfully.'.format( match_name ) )
if url_caught:
raise HydrusExceptions.DataMissing( '"{}" URL was accepted but not added successfully--could not find/generate a new downloader page for it.'.format( match_name ) )
def _InitialiseMenubar( self ):
self._menubar = QW.QMenuBar( self )
self._menubar.setNativeMenuBar( False )
self._menu_updater_database = self._InitialiseMenubarGetMenuUpdaterDatabase()
self._menu_updater_file = self._InitialiseMenubarGetMenuUpdaterFile()
self._menu_updater_network = self._InitialiseMenubarGetMenuUpdaterNetwork()
self._menu_updater_pages = self._InitialiseMenubarGetMenuUpdaterPages()
self._menu_updater_pending = self._InitialiseMenubarGetMenuUpdaterPending()
self._menu_updater_services = self._InitialiseMenubarGetMenuUpdaterServices()
self._menu_updater_undo = self._InitialiseMenubarGetMenuUpdaterUndo()
self._boned_updater = self._InitialiseMenubarGetBonesUpdater()
self.setMenuBar( self._menubar )
for name in MENU_ORDER:
if name == 'database':
( menu, label ) = self._InitialiseMenuInfoDatabase()
self.ReplaceMenu( name, menu, label )
self._menu_updater_database.update()
elif name == 'file':
( menu, label ) = self._InitialiseMenuInfoFile()
self.ReplaceMenu( name, menu, label )
self._menu_updater_file.update()
elif name == 'help':
( menu, label ) = self._InitialiseMenuInfoHelp()
self.ReplaceMenu( name, menu, label )
elif name == 'network':
( menu, label ) = self._InitialiseMenuInfoNetwork()
self.ReplaceMenu( name, menu, label )
self._menu_updater_network.update()
elif name == 'pages':
( menu, label ) = self._InitialiseMenuInfoPages()
self.ReplaceMenu( name, menu, label )
self._menu_updater_pages.update()
elif name == 'pending':
self._pending_service_keys_to_submenus = {}
self._menubar_pending_submenu = QW.QMenu( self )
self.ReplaceMenu( name, self._menubar_pending_submenu, '&pending' )
self._menu_updater_pending.update()
elif name == 'services':
( menu, label ) = self._InitialiseMenuInfoServices()
self.ReplaceMenu( name, menu, label )
self._menu_updater_services.update()
elif name == 'tags':
( menu, label ) = self._InitialiseMenuInfoTags()
self.ReplaceMenu( name, menu, label )
elif name == 'undo':
( self._menubar_undo_submenu, label ) = self._InitialiseMenuInfoUndo()
self.ReplaceMenu( name, self._menubar_undo_submenu, label )
self._menu_updater_undo.update()
def _InitialiseMenubarGetBonesUpdater( self ):
def loading_callable():
pass
def work_callable():
job_key = ClientThreading.JobKey()
job_key.SetVariable( 'popup_text_1', 'Loading Statistics\u2026' )
HG.client_controller.pub( 'message', job_key )
boned_stats = HG.client_controller.Read( 'boned_stats' )
return ( job_key, boned_stats )
def publish_callable( result ):
( job_key, boned_stats ) = result
job_key.Delete()
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review your fate' )
panel = ClientGUIScrolledPanelsReview.ReviewHowBonedAmI( frame, boned_stats )
frame.SetPanel( panel )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenubarGetMenuUpdaterDatabase( self ):
def loading_callable():
pass
def work_callable():
all_locations_are_default = HG.client_controller.client_files_manager.AllLocationsAreDefault()
return all_locations_are_default
def publish_callable( result ):
all_locations_are_default = result
backup_path = self._new_options.GetNoneableString( 'backup_path' )
self._menubar_database_set_up_backup_path.setVisible( all_locations_are_default and backup_path is None )
self._menubar_database_update_backup.setVisible( all_locations_are_default and backup_path is not None )
self._menubar_database_change_backup_path.setVisible( all_locations_are_default and backup_path is not None )
self._menubar_database_restore_backup.setVisible( all_locations_are_default )
self._menubar_database_multiple_location_label.setVisible( not all_locations_are_default )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenubarGetMenuUpdaterFile( self ):
def loading_callable():
self._menubar_file_import_submenu.setEnabled( False )
self._menubar_file_export_submenu.setEnabled( False )
def work_callable():
import_folder_names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_IMPORT_FOLDER )
export_folder_names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER )
return ( import_folder_names, export_folder_names )
def publish_callable( result ):
( import_folder_names, export_folder_names ) = result
self._menubar_file_import_submenu.setEnabled( True )
self._menubar_file_import_submenu.clear()
self._menubar_file_import_submenu.menuAction().setVisible( len( import_folder_names ) > 0 )
if len( import_folder_names ) > 0:
if len( import_folder_names ) > 1:
ClientGUIMenus.AppendMenuItem( self._menubar_file_import_submenu, 'check all', 'Check all import folders.', self._CheckImportFolder )
ClientGUIMenus.AppendSeparator( self._menubar_file_import_submenu )
for name in import_folder_names:
ClientGUIMenus.AppendMenuItem( self._menubar_file_import_submenu, name, 'Check this import folder now.', self._CheckImportFolder, name )
self._menubar_file_export_submenu.setEnabled( True )
self._menubar_file_export_submenu.clear()
self._menubar_file_export_submenu.menuAction().setVisible( len( export_folder_names ) > 0 )
if len( export_folder_names ) > 0:
if len( export_folder_names ) > 1:
ClientGUIMenus.AppendMenuItem( self._menubar_file_export_submenu, 'run all', 'Run all export folders.', self._RunExportFolder )
ClientGUIMenus.AppendSeparator( self._menubar_file_export_submenu )
for name in export_folder_names:
ClientGUIMenus.AppendMenuItem( self._menubar_file_export_submenu, name, 'Run this export folder now.', self._RunExportFolder, name )
simple_non_windows = not HC.PLATFORM_WINDOWS and not HG.client_controller.new_options.GetBoolean( 'advanced_mode' )
windows_or_advanced_non_windows = not simple_non_windows
self._menubar_file_minimise_to_system_tray.setVisible( ClientGUISystemTray.SystemTrayAvailable() and windows_or_advanced_non_windows )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenubarGetMenuUpdaterNetwork( self ):
def loading_callable():
pass
def work_callable():
return 1
def publish_callable( result ):
advanced_mode = self._controller.new_options.GetBoolean( 'advanced_mode' )
self._menubar_network_nudge_subs.setVisible( advanced_mode )
self._menubar_network_all_traffic_paused.setChecked( HG.client_controller.new_options.GetBoolean( 'pause_all_new_network_traffic' ) )
self._menubar_network_subscriptions_paused.setChecked( HC.options[ 'pause_subs_sync' ] )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenubarGetMenuUpdaterPages( self ):
def loading_callable():
self._menubar_pages_sessions_submenu.setEnabled( False )
self._menubar_pages_search_submenu.setEnabled( False )
self._menubar_pages_petition_submenu.setEnabled( False )
def work_callable():
gui_session_names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_CONTAINER )
if len( gui_session_names ) > 0:
gui_session_names_to_backup_timestamps = HG.client_controller.Read( 'serialisable_names_to_backup_timestamps', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_CONTAINER )
else:
gui_session_names_to_backup_timestamps = {}
return ( gui_session_names, gui_session_names_to_backup_timestamps )
def publish_callable( result ):
(
total_active_page_count,
total_active_num_hashes,
total_active_num_seeds,
total_closed_page_count,
total_closed_num_hashes,
total_closed_num_seeds
) = self.GetTotalPageCounts()
total_active_weight = ClientGUIPages.ConvertNumHashesAndSeedsToWeight( total_active_num_hashes, total_active_num_seeds )
if total_active_weight > 10000000 and self._controller.new_options.GetBoolean( 'show_session_size_warnings' ) and not self._have_shown_session_size_warning:
self._have_shown_session_size_warning = True
HydrusData.ShowText( 'Your session weight is {}, which is pretty big! To keep your UI lag-free, please try to close some pages or clear some finished downloaders!'.format( HydrusData.ToHumanInt( total_active_weight ) ) )
ClientGUIMenus.SetMenuItemLabel( self._menubar_pages_page_count, '{} pages open'.format( HydrusData.ToHumanInt( total_active_page_count ) ) )
ClientGUIMenus.SetMenuItemLabel( self._menubar_pages_session_weight, 'total session weight: {}'.format( HydrusData.ToHumanInt( total_active_weight ) ) )
#
( gui_session_names, gui_session_names_to_backup_timestamps ) = result
gui_session_names = sorted( gui_session_names )
self._menubar_pages_sessions_submenu.setEnabled( True )
self._menubar_pages_sessions_submenu.clear()
if len( gui_session_names ) > 0:
load = QW.QMenu( self._menubar_pages_sessions_submenu )
for name in gui_session_names:
ClientGUIMenus.AppendMenuItem( load, name, 'Close all other pages and load this session.', self._notebook.LoadGUISession, name )
ClientGUIMenus.AppendMenu( self._menubar_pages_sessions_submenu, load, 'clear and load' )
append = QW.QMenu( self._menubar_pages_sessions_submenu )
for name in gui_session_names:
ClientGUIMenus.AppendMenuItem( append, name, 'Append this session to whatever pages are already open.', self._notebook.AppendGUISessionFreshest, name )
ClientGUIMenus.AppendMenu( self._menubar_pages_sessions_submenu, append, 'append' )
if len( gui_session_names_to_backup_timestamps ) > 0:
append_backup = QW.QMenu( self._menubar_pages_sessions_submenu )
rows = sorted( gui_session_names_to_backup_timestamps.items() )
for ( name, timestamps ) in rows:
submenu = QW.QMenu( append_backup )
for timestamp in timestamps:
ClientGUIMenus.AppendMenuItem( submenu, HydrusData.ConvertTimestampToPrettyTime( timestamp ), 'Append this backup session to whatever pages are already open.', self._notebook.AppendGUISessionBackup, name, timestamp )
ClientGUIMenus.AppendMenu( append_backup, submenu, name )
ClientGUIMenus.AppendMenu( self._menubar_pages_sessions_submenu, append_backup, 'append session backup' )
save = QW.QMenu( self._menubar_pages_sessions_submenu )
for name in gui_session_names:
if name in ClientGUISession.RESERVED_SESSION_NAMES:
continue
ClientGUIMenus.AppendMenuItem( save, name, 'Save the existing open pages as a session.', self.ProposeSaveGUISession, name )
ClientGUIMenus.AppendMenuItem( save, 'as new session', 'Save the existing open pages as a session.', self.ProposeSaveGUISession )
ClientGUIMenus.AppendMenu( self._menubar_pages_sessions_submenu, save, 'save' )
if len( set( gui_session_names ).difference( ClientGUISession.RESERVED_SESSION_NAMES ) ) > 0:
delete = QW.QMenu( self._menubar_pages_sessions_submenu )
for name in gui_session_names:
if name in ClientGUISession.RESERVED_SESSION_NAMES:
continue
ClientGUIMenus.AppendMenuItem( delete, name, 'Delete this session.', self._DeleteGUISession, name )
ClientGUIMenus.AppendMenu( self._menubar_pages_sessions_submenu, delete, 'delete' )
#
self._menubar_pages_search_submenu.setEnabled( True )
self._menubar_pages_search_submenu.clear()
services = self._controller.services_manager.GetServices()
local_file_services = [ service for service in services if service.GetServiceType() == HC.LOCAL_FILE_DOMAIN and service.GetServiceKey() != CC.LOCAL_UPDATE_SERVICE_KEY ]
for service in local_file_services:
location_context = ClientLocation.LocationContext.STATICCreateSimple( service.GetServiceKey() )
ClientGUIMenus.AppendMenuItem( self._menubar_pages_search_submenu, service.GetName(), 'Open a new search tab.', self._notebook.NewPageQuery, location_context, on_deepest_notebook = True )
location_context = ClientLocation.LocationContext.STATICCreateSimple( CC.TRASH_SERVICE_KEY )
ClientGUIMenus.AppendMenuItem( self._menubar_pages_search_submenu, 'trash', 'Open a new search tab for your recently deleted files.', self._notebook.NewPageQuery, location_context, on_deepest_notebook = True )
repositories = [ service for service in services if service.GetServiceType() in HC.REPOSITORIES ]
file_repositories = [ service for service in repositories if service.GetServiceType() == HC.FILE_REPOSITORY ]
for service in file_repositories:
location_context = ClientLocation.LocationContext.STATICCreateSimple( service.GetServiceKey() )
ClientGUIMenus.AppendMenuItem( self._menubar_pages_search_submenu, service.GetName(), 'Open a new search tab for ' + service.GetName() + '.', self._notebook.NewPageQuery, location_context, on_deepest_notebook = True )
petition_permissions = [ ( content_type, HC.PERMISSION_ACTION_MODERATE ) for content_type in HC.SERVICE_TYPES_TO_CONTENT_TYPES ]
petition_resolvable_repositories = [ repository for repository in repositories if True in ( repository.HasPermission( content_type, action ) for ( content_type, action ) in petition_permissions ) ]
self._menubar_pages_petition_submenu.setEnabled( True )
self._menubar_pages_petition_submenu.clear()
self._menubar_pages_petition_submenu.menuAction().setVisible( len( petition_resolvable_repositories ) > 0 )
for service in petition_resolvable_repositories:
ClientGUIMenus.AppendMenuItem( self._menubar_pages_petition_submenu, service.GetName(), 'Open a new petition page for ' + service.GetName() + '.', self._notebook.NewPagePetitions, service.GetServiceKey(), on_deepest_notebook = True )
self._menubar_pages_download_popup_submenu.setEnabled( True )
has_ipfs = len( [ service for service in services if service.GetServiceType() == HC.IPFS ] )
self._menubar_pages_download_popup_submenu.menuAction().setVisible( has_ipfs )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenubarGetMenuUpdaterPending( self ):
def loading_callable():
pass
def work_callable():
nums_pending = HG.client_controller.Read( 'nums_pending' )
return nums_pending
def publish_callable( result ):
nums_pending = result
total_num_pending = 0
for service_key in nums_pending.keys():
if service_key not in self._pending_service_keys_to_submenus:
service = self._controller.services_manager.GetService( service_key )
name = service.GetName()
submenu = QW.QMenu( self._menubar_pending_submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'commit', 'Upload {}\'s pending content.'.format( name ), self._UploadPending, service_key )
ClientGUIMenus.AppendMenuItem( submenu, 'forget', 'Clear {}\'s pending content.'.format( name ), self._DeletePending, service_key )
ClientGUIMenus.SetMenuTitle( submenu, name )
insert_before_action = None
for action in self._menubar_pending_submenu.actions():
if action.text() > name:
insert_before_action = action
break
if insert_before_action is None:
self._menubar_pending_submenu.addMenu( submenu )
else:
self._menubar_pending_submenu.insertMenu( insert_before_action, submenu )
self._pending_service_keys_to_submenus[ service_key ] = submenu
for ( service_key, submenu ) in self._pending_service_keys_to_submenus.items():
num_pending = 0
num_petitioned = 0
if service_key in nums_pending:
info = nums_pending[ service_key ]
service = self._controller.services_manager.GetService( service_key )
service_type = service.GetServiceType()
name = service.GetName()
if service_type == HC.TAG_REPOSITORY:
pending_phrase = 'tag data to upload'
petitioned_phrase = 'tag data to petition'
elif service_type == HC.FILE_REPOSITORY:
pending_phrase = 'files to upload'
petitioned_phrase = 'files to petition'
elif service_type == HC.IPFS:
pending_phrase = 'files to pin'
petitioned_phrase = 'files to unpin'
if service_type == HC.TAG_REPOSITORY:
num_pending = info[ HC.SERVICE_INFO_NUM_PENDING_MAPPINGS ] + info[ HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS ] + info[ HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS ]
num_petitioned = info[ HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS ] + info[ HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS ] + info[ HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS ]
elif service_type in ( HC.FILE_REPOSITORY, HC.IPFS ):
num_pending = info[ HC.SERVICE_INFO_NUM_PENDING_FILES ]
num_petitioned = info[ HC.SERVICE_INFO_NUM_PETITIONED_FILES ]
if num_pending + num_petitioned > 0:
if service_key in self._currently_uploading_pending:
title = '{}: currently uploading {}'.format( name, HydrusData.ToHumanInt( num_pending + num_petitioned ) )
else:
submessages = []
if num_pending > 0:
submessages.append( '{} {}'.format( HydrusData.ToHumanInt( num_pending ), pending_phrase ) )
if num_petitioned > 0:
submessages.append( '{} {}'.format( HydrusData.ToHumanInt( num_petitioned ), petitioned_phrase ) )
title = '{}: {}'.format( name, ', '.join( submessages ) )
submenu.setEnabled( service_key not in self._currently_uploading_pending )
ClientGUIMenus.SetMenuTitle( submenu, title )
submenu.menuAction().setVisible( num_pending + num_petitioned > 0 )
total_num_pending += num_pending + num_petitioned
ClientGUIMenus.SetMenuTitle( self._menubar_pending_submenu, 'pending ({})'.format( HydrusData.ToHumanInt( total_num_pending ) ) )
self._menubar_pending_submenu.menuAction().setVisible( total_num_pending > 0 )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenubarGetMenuUpdaterServices( self ):
def loading_callable():
self._menubar_services_admin_submenu.setEnabled( False )
def work_callable():
return 1
def publish_callable( result ):
self._menubar_services_admin_submenu.setEnabled( True )
self._menubar_services_admin_submenu.clear()
repository_admin_permissions = [ ( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_CREATE ), ( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_MODERATE ), ( HC.CONTENT_TYPE_ACCOUNT_TYPES, HC.PERMISSION_ACTION_MODERATE ), ( HC.CONTENT_TYPE_OPTIONS, HC.PERMISSION_ACTION_MODERATE ) ]
repositories = self._controller.services_manager.GetServices( HC.REPOSITORIES )
admin_repositories = [ service for service in repositories if True in ( service.HasPermission( content_type, action ) for ( content_type, action ) in repository_admin_permissions ) ]
servers_admin = self._controller.services_manager.GetServices( ( HC.SERVER_ADMIN, ) )
server_admins = [ service for service in servers_admin if service.HasPermission( HC.CONTENT_TYPE_SERVICES, HC.PERMISSION_ACTION_MODERATE ) ]
admin_services = admin_repositories + server_admins
if len( admin_services ) > 0:
for service in admin_services:
submenu = QW.QMenu( self._menubar_services_admin_submenu )
service_key = service.GetServiceKey()
service_type = service.GetServiceType()
can_create_accounts = service.HasPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_CREATE )
can_overrule_accounts = service.HasPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_MODERATE )
can_overrule_account_types = service.HasPermission( HC.CONTENT_TYPE_ACCOUNT_TYPES, HC.PERMISSION_ACTION_MODERATE )
can_overrule_services = service.HasPermission( HC.CONTENT_TYPE_SERVICES, HC.PERMISSION_ACTION_MODERATE )
can_overrule_options = service.HasPermission( HC.CONTENT_TYPE_OPTIONS, HC.PERMISSION_ACTION_MODERATE )
if can_overrule_accounts:
ClientGUIMenus.AppendMenuItem( submenu, 'review all accounts', 'See all accounts.', self._STARTReviewAllAccounts, service_key )
ClientGUIMenus.AppendMenuItem( submenu, 'modify an account', 'Modify a specific account\'s type and expiration.', self._ModifyAccount, service_key )
if can_overrule_accounts and service_type == HC.FILE_REPOSITORY:
ClientGUIMenus.AppendMenuItem( submenu, 'get an uploader\'s ip address', 'Fetch the ip address that uploaded a specific file, if the service knows it.', self._FetchIP, service_key )
if can_create_accounts:
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'create new accounts', 'Create new accounts for this service.', self._GenerateNewAccounts, service_key )
if can_overrule_account_types:
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage account types', 'Add, edit and delete account types for this service.', self._STARTManageAccountTypes, service_key )
if can_overrule_options and service_type in HC.REPOSITORIES:
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'change update period', 'Change the update period for this service.', self._ManageServiceOptionsUpdatePeriod, service_key )
ClientGUIMenus.AppendMenuItem( submenu, 'change anonymisation period', 'Change the account history nullification period for this service.', self._ManageServiceOptionsNullificationPeriod, service_key )
if can_overrule_services and service_type == HC.SERVER_ADMIN:
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage services', 'Add, edit, and delete this server\'s services.', self._ManageServer, service_key )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'backup server', 'Command the server to temporarily pause and back up its database.', self._BackupServer, service_key )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'vacuum server', 'Command the server to temporarily pause and vacuum its database.', self._VacuumServer, service_key )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'server/db lock: on', 'Command the server to lock itself and disconnect its db.', self._LockServer, service_key, True )
ClientGUIMenus.AppendMenuItem( submenu, 'server/db lock: test', 'See if the server is currently busy.', self._TestServerBusy, service_key )
ClientGUIMenus.AppendMenuItem( submenu, 'server/db lock: off', 'Command the server to unlock itself and resume its db.', self._LockServer, service_key, False )
ClientGUIMenus.AppendMenu( self._menubar_services_admin_submenu, submenu, service.GetName() )
self._menubar_services_admin_submenu.menuAction().setVisible( len( admin_services ) > 0 )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenubarGetMenuUpdaterUndo( self ):
def loading_callable():
self._menubar_undo_closed_pages_submenu.setEnabled( False )
def work_callable():
return 1
def publish_callable( result ):
have_closed_pages = len( self._closed_pages ) > 0
undo_manager = self._controller.GetManager( 'undo' )
( undo_string, redo_string ) = undo_manager.GetUndoRedoStrings()
have_undo_stuff = undo_string is not None or redo_string is not None
if have_closed_pages or have_undo_stuff:
self._menubar_undo_undo.setVisible( undo_string is not None )
if undo_string is not None:
ClientGUIMenus.SetMenuItemLabel( self._menubar_undo_undo, undo_string )
self._menubar_undo_redo.setVisible( redo_string is not None )
if redo_string is not None:
ClientGUIMenus.SetMenuItemLabel( self._menubar_undo_redo, redo_string )
self._menubar_undo_closed_pages_submenu.setEnabled( True )
self._menubar_undo_closed_pages_submenu.clear()
self._menubar_undo_closed_pages_submenu.menuAction().setVisible( have_closed_pages )
if have_closed_pages:
ClientGUIMenus.AppendMenuItem( self._menubar_undo_closed_pages_submenu, 'clear all', 'Remove all closed pages from memory.', self.AskToDeleteAllClosedPages )
self._menubar_undo_closed_pages_submenu.addSeparator()
args = []
for ( i, ( time_closed, page ) ) in enumerate( self._closed_pages ):
name = page.GetName()
args.append( ( i, name + ' - ' + page.GetPrettyStatus() ) )
args.reverse() # so that recently closed are at the top
for ( index, name ) in args:
ClientGUIMenus.AppendMenuItem( self._menubar_undo_closed_pages_submenu, name, 'Restore this page.', self._UnclosePage, index )
self._menubar_undo_submenu.menuAction().setVisible( have_closed_pages or have_undo_stuff )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _InitialiseMenuInfoDatabase( self ):
menu = QW.QMenu( self )
ClientGUIMenus.AppendMenuItem( menu, 'set a password', 'Set a simple password for the database so only you can open it in the client.', self._SetPassword )
ClientGUIMenus.AppendSeparator( menu )
self._menubar_database_set_up_backup_path = ClientGUIMenus.AppendMenuItem( menu, 'set up a database backup location', 'Choose a path to back the database up to.', self._SetupBackupPath )
self._menubar_database_update_backup = ClientGUIMenus.AppendMenuItem( menu, 'update database backup', 'Back the database up to an external location.', self._BackupDatabase )
self._menubar_database_change_backup_path = ClientGUIMenus.AppendMenuItem( menu, 'change database backup location', 'Choose a path to back the database up to.', self._SetupBackupPath )
ClientGUIMenus.AppendSeparator( menu )
self._menubar_database_restore_backup = ClientGUIMenus.AppendMenuItem( menu, 'restore from a database backup', 'Restore the database from an external location.', self._controller.RestoreDatabase )
message = 'Your database is stored across multiple locations, which disables my internal backup routine. To back up, please use a third-party program that will work better than anything I can write.'
message += os.linesep * 2
message += 'Please check the help for more info on how best to backup manually.'
self._menubar_database_multiple_location_label = ClientGUIMenus.AppendMenuItem( menu, 'database is stored in multiple locations', 'The database is migrated.', HydrusData.ShowText, message )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'migrate database', 'Review and manage the locations your database is stored.', self._MigrateDatabase )
ClientGUIMenus.AppendSeparator( menu )
file_maintenance_menu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( file_maintenance_menu, 'manage scheduled jobs', 'Review outstanding jobs, and schedule new ones.', self._ReviewFileMaintenance )
ClientGUIMenus.AppendSeparator( file_maintenance_menu )
check_manager = ClientGUICommon.CheckboxManagerOptions( 'file_maintenance_during_idle' )
current_value = check_manager.GetCurrentValue()
func = check_manager.Invert
ClientGUIMenus.AppendMenuCheckItem( file_maintenance_menu, 'work file jobs during idle time', 'Control whether file maintenance can work during idle time.', current_value, func )
check_manager = ClientGUICommon.CheckboxManagerOptions( 'file_maintenance_during_active' )
current_value = check_manager.GetCurrentValue()
func = check_manager.Invert
ClientGUIMenus.AppendMenuCheckItem( file_maintenance_menu, 'work file jobs during normal time', 'Control whether file maintenance can work during normal time.', current_value, func )
ClientGUIMenus.AppendMenu( menu, file_maintenance_menu, 'file maintenance' )
maintenance_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( maintenance_submenu, 'analyze', 'Optimise slow queries by running statistical analyses on the database.', self._AnalyzeDatabase )
ClientGUIMenus.AppendMenuItem( maintenance_submenu, 'review vacuum data', 'See whether it is worth rebuilding the database to reformat tables and recover disk space.', self._ReviewVacuumData )
ClientGUIMenus.AppendSeparator( maintenance_submenu )
ClientGUIMenus.AppendMenuItem( maintenance_submenu, 'clear orphan files', 'Clear out surplus files that have found their way into the file structure.', self._ClearOrphanFiles )
ClientGUIMenus.AppendMenuItem( maintenance_submenu, 'clear orphan file records', 'Clear out surplus file records that have not been deleted correctly.', self._ClearOrphanFileRecords )
ClientGUIMenus.AppendMenuItem( maintenance_submenu, 'clear orphan tables', 'Clear out surplus db tables that have not been deleted correctly.', self._ClearOrphanTables )
ClientGUIMenus.AppendMenuItem( maintenance_submenu, 'clear orphan hashed serialisables', 'Clear non-needed cached hashed serialisable objects.', self._ClearOrphanHashedSerialisables )
ClientGUIMenus.AppendMenu( menu, maintenance_submenu, 'db maintenance' )
check_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( check_submenu, 'database integrity', 'Have the database examine all its records for internal consistency.', self._CheckDBIntegrity )
ClientGUIMenus.AppendMenuItem( check_submenu, 'repopulate truncated mappings tables', 'Use the mappings cache to try to repair a previously damaged mappings file.', self._RepopulateMappingsTables )
ClientGUIMenus.AppendMenuItem( check_submenu, 'fix logically inconsistent mappings', 'Remove tags that are occupying two mutually exclusive states.', self._FixLogicallyInconsistentMappings )
ClientGUIMenus.AppendMenuItem( check_submenu, 'fix invalid tags', 'Scan the database for invalid tags.', self._RepairInvalidTags )
ClientGUIMenus.AppendMenu( menu, check_submenu, 'check and repair' )
regen_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'total pending count, in the pending menu', 'Regenerate the pending count up top.', self._DeleteServiceInfo, only_pending = True )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag storage mappings cache (all, with deferred siblings & parents calculation)', 'Delete and recreate the tag mappings cache, fixing bad tags or miscounts.', self._RegenerateTagMappingsCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag storage mappings cache (just pending tags, instant calculation)', 'Delete and recreate the tag pending mappings cache, fixing bad tags or miscounts.', self._RegenerateTagPendingMappingsCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag display mappings cache (all, deferred siblings & parents calculation)', 'Delete and recreate the tag display mappings cache, fixing bad tags or miscounts.', self._RegenerateTagDisplayMappingsCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag display mappings cache (just pending tags, instant calculation)', 'Delete and recreate the tag display pending mappings cache, fixing bad tags or miscounts.', self._RegenerateTagDisplayPendingMappingsCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag display mappings cache (missing file repopulation)', 'Repopulate the mappings cache if you know it is lacking files, fixing bad tags or miscounts.', self._RepopulateTagDisplayMappingsCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag siblings lookup cache', 'Delete and recreate the tag siblings cache.', self._RegenerateTagSiblingsLookupCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag parents lookup cache', 'Delete and recreate the tag siblings cache.', self._RegenerateTagParentsLookupCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag text search cache', 'Delete and regenerate the cache hydrus uses for fast tag search.', self._RegenerateTagCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag text search cache (subtags repopulation)', 'Repopulate the subtags for the cache hydrus uses for fast tag search.', self._RepopulateTagCacheMissingSubtags )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'tag text search cache (searchable subtag maps)', 'Regenerate the searchable subtag maps.', self._RegenerateTagCacheSearchableSubtagsMaps )
ClientGUIMenus.AppendSeparator( regen_submenu )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'local hash cache', 'Repopulate the cache hydrus uses for fast hash lookup for local files.', self._RegenerateLocalHashCache )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'local tag cache', 'Repopulate the cache hydrus uses for fast tag lookup for local files.', self._RegenerateLocalTagCache )
ClientGUIMenus.AppendSeparator( regen_submenu )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'clear service info cache', 'Delete all cached service info like total number of mappings or files, in case it has become desynchronised. Some parts of the gui may be laggy immediately after this as these numbers are recalculated.', self._DeleteServiceInfo )
ClientGUIMenus.AppendMenuItem( regen_submenu, 'similar files search tree', 'Delete and recreate the similar files search tree.', self._RegenerateSimilarFilesTree )
ClientGUIMenus.AppendMenu( menu, regen_submenu, 'regenerate' )
ClientGUIMenus.AppendSeparator( menu )
file_viewing_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( file_viewing_submenu, 'clear all file viewing statistics', 'Delete all file viewing records from the database.', self._ClearFileViewingStats )
ClientGUIMenus.AppendMenuItem( file_viewing_submenu, 'cull file viewing statistics based on current min/max values', 'Cull your file viewing statistics based on minimum and maximum permitted time deltas.', self._CullFileViewingStats )
ClientGUIMenus.AppendMenu( menu, file_viewing_submenu, 'file viewing statistics' )
return ( menu, '&database' )
def _InitialiseMenuInfoFile( self ):
menu = QW.QMenu( self )
ClientGUIMenus.AppendMenuItem( menu, 'import files', 'Add new files to the database.', self._ImportFiles )
ClientGUIMenus.AppendSeparator( menu )
#
i_and_e_submenu = QW.QMenu( menu )
submenu = QW.QMenu( i_and_e_submenu )
ClientGUIMenus.AppendMenuCheckItem( submenu, 'import folders', 'Pause the client\'s import folders.', HC.options['pause_import_folders_sync'], self._PausePlaySync, 'import_folders' )
ClientGUIMenus.AppendMenuCheckItem( submenu, 'export folders', 'Pause the client\'s export folders.', HC.options['pause_export_folders_sync'], self._PausePlaySync, 'export_folders' )
ClientGUIMenus.AppendMenu( i_and_e_submenu, submenu, 'pause' )
ClientGUIMenus.AppendSeparator( i_and_e_submenu )
self._menubar_file_import_submenu = QW.QMenu( i_and_e_submenu )
ClientGUIMenus.AppendMenu( i_and_e_submenu, self._menubar_file_import_submenu, 'check import folder now' )
self._menubar_file_export_submenu = QW.QMenu( i_and_e_submenu )
ClientGUIMenus.AppendMenu( i_and_e_submenu, self._menubar_file_export_submenu, 'run export folder now' )
ClientGUIMenus.AppendSeparator( i_and_e_submenu )
ClientGUIMenus.AppendMenuItem( i_and_e_submenu, 'manage import folders', 'Manage folders from which the client can automatically import.', self._ManageImportFolders )
ClientGUIMenus.AppendMenuItem( i_and_e_submenu, 'manage export folders', 'Manage folders to which the client can automatically export.', self._ManageExportFolders )
ClientGUIMenus.AppendMenu( menu, i_and_e_submenu, 'import and export folders' )
#
ClientGUIMenus.AppendSeparator( menu )
open = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( open, 'installation directory', 'Open the installation directory for this client.', self._OpenInstallFolder )
ClientGUIMenus.AppendMenuItem( open, 'database directory', 'Open the database directory for this instance of the client.', self._OpenDBFolder )
ClientGUIMenus.AppendMenuItem( open, 'quick export directory', 'Open the export directory so you can easily access the files you have exported.', self._OpenExportFolder )
ClientGUIMenus.AppendMenu( menu, open, 'open' )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'options', 'Change how the client operates.', self._ManageOptions )
ClientGUIMenus.AppendMenuItem( menu, 'shortcuts', 'Edit the shortcuts your client responds to.', ClientGUIShortcutControls.ManageShortcuts, self )
ClientGUIMenus.AppendSeparator( menu )
label = 'minimise to system tray'
if not HC.PLATFORM_WINDOWS:
label += ' (may be buggy/crashy!)'
self._menubar_file_minimise_to_system_tray = ClientGUIMenus.AppendMenuItem( menu, label, 'Hide the client to an icon on your system tray.', self._FlipShowHideWholeUI )
ClientGUIMenus.AppendSeparator( menu )
we_borked_linux_pyinstaller = HC.PLATFORM_LINUX and not HC.RUNNING_FROM_SOURCE
if not we_borked_linux_pyinstaller:
ClientGUIMenus.AppendMenuItem( menu, 'restart', 'Shut the client down and then start it up again.', self.TryToExit, restart = True )
ClientGUIMenus.AppendMenuItem( menu, 'exit and force shutdown maintenance', 'Shut the client down and force any outstanding shutdown maintenance to run.', self.TryToExit, force_shutdown_maintenance = True )
ClientGUIMenus.AppendMenuItem( menu, 'exit', 'Shut the client down.', self.TryToExit )
return ( menu, '&file' )
def _InitialiseMenuInfoHelp( self ):
menu = QW.QMenu( self )
ClientGUIMenus.AppendMenuItem( menu, 'help and getting started guide', 'Open hydrus\'s local help in your web browser.', ClientPaths.LaunchPathInWebBrowser, os.path.join( HC.HELP_DIR, 'index.html' ) )
links = QW.QMenu( menu )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'site', 'Open hydrus\'s website, which is mostly a mirror of the local help.', CC.global_pixmaps().file_repository, ClientPaths.LaunchURLInWebBrowser, 'https://hydrusnetwork.github.io/hydrus/' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'github repository', 'Open the hydrus github repository.', CC.global_pixmaps().github, ClientPaths.LaunchURLInWebBrowser, 'https://github.com/hydrusnetwork/hydrus' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'latest build', 'Open the latest build on the hydrus github repository.', CC.global_pixmaps().github, ClientPaths.LaunchURLInWebBrowser, 'https://github.com/hydrusnetwork/hydrus/releases/latest' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'issue tracker', 'Open the github issue tracker, which is run by users.', CC.global_pixmaps().github, ClientPaths.LaunchURLInWebBrowser, 'https://github.com/hydrusnetwork/hydrus/issues' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, '8chan.moe /t/ (Hydrus Network General)', 'Open the 8chan.moe /t/ board, where a Hydrus Network General should exist with release posts and other status updates.', CC.global_pixmaps().eight_chan, ClientPaths.LaunchURLInWebBrowser, 'https://8chan.moe/t/catalog.html' )
site = ClientGUIMenus.AppendMenuItem( links, 'Endchan board bunker', 'Open hydrus dev\'s Endchan board, the bunker for the case when 8chan.moe is unavailable. Try .org if .net is unavailable.', ClientPaths.LaunchURLInWebBrowser, 'https://endchan.net/hydrus/index.html' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'twitter', 'Open hydrus dev\'s twitter, where he makes general progress updates and emergency notifications.', CC.global_pixmaps().twitter, ClientPaths.LaunchURLInWebBrowser, 'https://twitter.com/hydrusnetwork' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'tumblr', 'Open hydrus dev\'s tumblr, where he makes release posts and other status updates.', CC.global_pixmaps().tumblr, ClientPaths.LaunchURLInWebBrowser, 'https://hydrus.tumblr.com/' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'discord', 'Open a discord channel where many hydrus users congregate. Hydrus dev visits regularly.', CC.global_pixmaps().discord, ClientPaths.LaunchURLInWebBrowser, 'https://discord.gg/wPHPCUZ' )
site = ClientGUIMenus.AppendMenuBitmapItem( links, 'patreon', 'Open hydrus dev\'s patreon, which lets you support development.', CC.global_pixmaps().patreon, ClientPaths.LaunchURLInWebBrowser, 'https://www.patreon.com/hydrus_dev' )
ClientGUIMenus.AppendMenu( menu, links, 'links' )
ClientGUIMenus.AppendMenuItem( menu, 'changelog', 'Open hydrus\'s local changelog in your web browser.', ClientPaths.LaunchPathInWebBrowser, os.path.join( HC.HELP_DIR, 'changelog.html' ) )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'add the public tag repository', 'This will add the public tag repository to your client.', self._AutoRepoSetup )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'how boned am I?', 'Check for a summary of your ride so far.', self._HowBonedAmI )
ClientGUIMenus.AppendSeparator( menu )
currently_darkmode = self._new_options.GetString( 'current_colourset' ) == 'darkmode'
ClientGUIMenus.AppendMenuCheckItem( menu, 'darkmode', 'Set the \'darkmode\' colourset on and off.', currently_darkmode, self.FlipDarkmode )
check_manager = ClientGUICommon.CheckboxManagerOptions( 'advanced_mode' )
current_value = check_manager.GetCurrentValue()
func = check_manager.Invert
ClientGUIMenus.AppendMenuCheckItem( menu, 'advanced mode', 'Turn on advanced menu options and buttons.', current_value, func )
ClientGUIMenus.AppendSeparator( menu )
debug = QW.QMenu( menu )
debug_modes = QW.QMenu( debug )
ClientGUIMenus.AppendMenuCheckItem( debug_modes, 'force idle mode', 'Make the client consider itself idle and fire all maintenance routines right now. This may hang the gui for a while.', HG.force_idle_mode, self._SwitchBoolean, 'force_idle_mode' )
ClientGUIMenus.AppendMenuCheckItem( debug_modes, 'no page limit mode', 'Let the user create as many pages as they want with no warnings or prohibitions.', HG.no_page_limit_mode, self._SwitchBoolean, 'no_page_limit_mode' )
ClientGUIMenus.AppendMenuCheckItem( debug_modes, 'thumbnail debug mode', 'Show some thumbnail debug info.', HG.thumbnail_debug_mode, self._SwitchBoolean, 'thumbnail_debug_mode' )
ClientGUIMenus.AppendMenuItem( debug_modes, 'simulate a wake from sleep', 'Tell the controller to pretend that it just woke up from sleep.', self._controller.SimulateWakeFromSleepEvent )
ClientGUIMenus.AppendMenu( debug, debug_modes, 'debug modes' )
profiling = QW.QMenu( debug )
profile_mode_message = 'If something is running slow, you can turn on profile mode to have hydrus gather information on how long many jobs take to run.'
profile_mode_message += os.linesep * 2
profile_mode_message += 'Turn the mode on, do the slow thing for a bit, and then turn it off. In your database directory will be a new profile log, which is really helpful for hydrus dev to figure out what is running slow for you and how to fix it.'
profile_mode_message += os.linesep * 2
profile_mode_message += 'A new Query Planner mode also makes very detailed database analysis. This is an alternate profiling mode hydev is testing.'
profile_mode_message += os.linesep * 2
profile_mode_message += 'More information is available in the help, under \'reducing program lag\'.'
ClientGUIMenus.AppendMenuItem( profiling, 'what is this?', 'Show profile info.', QW.QMessageBox.information, self, 'Profile modes', profile_mode_message )
ClientGUIMenus.AppendMenuCheckItem( profiling, 'profile mode', 'Run detailed \'profiles\'.', HG.profile_mode, HG.client_controller.FlipProfileMode )
ClientGUIMenus.AppendMenuCheckItem( profiling, 'query planner mode', 'Run detailed \'query plans\'.', HG.query_planner_mode, HG.client_controller.FlipQueryPlannerMode )
ClientGUIMenus.AppendMenu( debug, profiling, 'profiling' )
report_modes = QW.QMenu( debug )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'cache report mode', 'Have the image and thumb caches report their operation.', HG.cache_report_mode, self._SwitchBoolean, 'cache_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'callto report mode', 'Report whenever the thread pool is given a task.', HG.callto_report_mode, self._SwitchBoolean, 'callto_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'canvas tile borders mode', 'Draw tile borders.', HG.canvas_tile_outline_mode, self._SwitchBoolean, 'canvas_tile_outline_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'daemon report mode', 'Have the daemons report whenever they fire their jobs.', HG.daemon_report_mode, self._SwitchBoolean, 'daemon_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'db report mode', 'Have the db report query information, where supported.', HG.db_report_mode, self._SwitchBoolean, 'db_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'file import report mode', 'Have the db and file manager report file import progress.', HG.file_import_report_mode, self._SwitchBoolean, 'file_import_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'file report mode', 'Have the file manager report file request information, where supported.', HG.file_report_mode, self._SwitchBoolean, 'file_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'gui report mode', 'Have the gui report inside information, where supported.', HG.gui_report_mode, self._SwitchBoolean, 'gui_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'hover window report mode', 'Have the hover windows report their show/hide logic.', HG.hover_window_report_mode, self._SwitchBoolean, 'hover_window_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'media load report mode', 'Have the client report media load information, where supported.', HG.media_load_report_mode, self._SwitchBoolean, 'media_load_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'mpv report mode', 'Have the client report significant mpv debug information.', HG.mpv_report_mode, self._SwitchBoolean, 'mpv_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'network report mode', 'Have the network engine report new jobs.', HG.network_report_mode, self._SwitchBoolean, 'network_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'pubsub report mode', 'Report info about every pubsub processed.', HG.pubsub_report_mode, self._SwitchBoolean, 'pubsub_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'similar files metadata generation report mode', 'Have the perceptual_hash generation routine report its progress.', HG.phash_generation_report_mode, self._SwitchBoolean, 'phash_generation_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'shortcut report mode', 'Have the new shortcut system report what shortcuts it catches and whether it matches an action.', HG.shortcut_report_mode, self._SwitchBoolean, 'shortcut_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'subprocess report mode', 'Report whenever an external process is called.', HG.subprocess_report_mode, self._SwitchBoolean, 'subprocess_report_mode' )
ClientGUIMenus.AppendMenuCheckItem( report_modes, 'subscription report mode', 'Have the subscription system report what it is doing.', HG.subscription_report_mode, self._SwitchBoolean, 'subscription_report_mode' )
ClientGUIMenus.AppendMenu( debug, report_modes, 'report modes' )
gui_actions = QW.QMenu( debug )
default_location_context = HG.client_controller.services_manager.GetDefaultLocationContext()
def flip_macos_antiflicker():
HG.macos_antiflicker_test = not HG.macos_antiflicker_test
if HG.macos_antiflicker_test:
HydrusData.ShowText( 'Hey, the macOS safety code is now disabled. Please open a new media viewer and see if a mix of video and images show ok, no 100% CPU problems.' )
if HC.PLATFORM_MACOS:
ClientGUIMenus.AppendMenuItem( gui_actions, 'macos anti-flicker test', 'Try it out, let me know how it goes.', flip_macos_antiflicker )
ClientGUIMenus.AppendMenuItem( gui_actions, 'make some popups', 'Throw some varied popups at the message manager, just to check it is working.', self._DebugMakeSomePopups )
ClientGUIMenus.AppendMenuItem( gui_actions, 'make a long text popup', 'Make a popup with text that will grow in size.', self._DebugLongTextPopup )
ClientGUIMenus.AppendMenuItem( gui_actions, 'make a popup in five seconds', 'Throw a delayed popup at the message manager, giving you time to minimise or otherwise alter the client before it arrives.', self._controller.CallLater, 5, HydrusData.ShowText, 'This is a delayed popup message.' )
ClientGUIMenus.AppendMenuItem( gui_actions, 'make a modal popup in five seconds', 'Throw up a delayed modal popup to test with. It will stay alive for five seconds.', self._DebugMakeDelayedModalPopup, True )
ClientGUIMenus.AppendMenuItem( gui_actions, 'make a non-cancellable modal popup in five seconds', 'Throw up a delayed modal popup to test with. It will stay alive for five seconds.', self._DebugMakeDelayedModalPopup, False )
ClientGUIMenus.AppendMenuItem( gui_actions, 'make a new page in five seconds', 'Throw a delayed page at the main notebook, giving you time to minimise or otherwise alter the client before it arrives.', self._controller.CallLater, 5, self._controller.pub, 'new_page_query', default_location_context )
ClientGUIMenus.AppendMenuItem( gui_actions, 'refresh pages menu in five seconds', 'Delayed refresh the pages menu, giving you time to minimise or otherwise alter the client before it arrives.', self._controller.CallLater, 5, self._menu_updater_pages.update )
ClientGUIMenus.AppendMenuItem( gui_actions, 'publish some sub files in five seconds', 'Publish some files like a subscription would.', self._controller.CallLater, 5, lambda: HG.client_controller.pub( 'imported_files_to_page', [ HydrusData.GenerateKey() for i in range( 5 ) ], 'example sub files' ) )
ClientGUIMenus.AppendMenuItem( gui_actions, 'make a parentless text ctrl dialog', 'Make a parentless text control in a dialog to test some character event catching.', self._DebugMakeParentlessTextCtrl )
ClientGUIMenus.AppendMenuItem( gui_actions, 'reset multi-column list settings to default', 'Reset all multi-column list widths and other display settings to default.', self._DebugResetColumnListManager )
ClientGUIMenus.AppendMenuItem( gui_actions, 'force a main gui layout now', 'Tell the gui to relayout--useful to test some gui bootup layout issues.', self.adjustSize )
ClientGUIMenus.AppendMenuItem( gui_actions, 'save \'last session\' gui session', 'Make an immediate save of the \'last session\' gui session. Mostly for testing crashes, where last session is not saved correctly.', self.ProposeSaveGUISession, CC.LAST_SESSION_SESSION_NAME )
ClientGUIMenus.AppendMenu( debug, gui_actions, 'gui actions' )
data_actions = QW.QMenu( debug )
ClientGUIMenus.AppendMenuCheckItem( data_actions, 'db ui-hang relief mode', 'Have UI-synchronised database jobs process pending Qt events while they wait.', HG.db_ui_hang_relief_mode, self._SwitchBoolean, 'db_ui_hang_relief_mode' )
ClientGUIMenus.AppendMenuItem( data_actions, 'review threads', 'Show current threads and what they are doing.', self._ReviewThreads )
ClientGUIMenus.AppendMenuItem( data_actions, 'show scheduled jobs', 'Print some information about the currently scheduled jobs log.', self._DebugShowScheduledJobs )
ClientGUIMenus.AppendMenuItem( data_actions, 'subscription manager snapshot', 'Have the subscription system show what it is doing.', self._controller.subscriptions_manager.ShowSnapshot )
ClientGUIMenus.AppendMenuItem( data_actions, 'flush log', 'Command the log to write any buffered contents to hard drive.', HydrusData.DebugPrint, 'Flushing log' )
ClientGUIMenus.AppendMenuItem( data_actions, 'enable truncated image loading', 'Enable the truncated image loading to test out broken jpegs.', self._EnableLoadTruncatedImages )
ClientGUIMenus.AppendSeparator( data_actions )
ClientGUIMenus.AppendMenuItem( data_actions, 'simulate program quit signal', 'Kill the program via a QApplication quit.', QW.QApplication.instance().quit )
ClientGUIMenus.AppendMenu( debug, data_actions, 'data actions' )
memory_actions = QW.QMenu( debug )
ClientGUIMenus.AppendMenuItem( memory_actions, 'run fast memory maintenance', 'Tell all the fast caches to maintain themselves.', self._controller.MaintainMemoryFast )
ClientGUIMenus.AppendMenuItem( memory_actions, 'run slow memory maintenance', 'Tell all the slow caches to maintain themselves.', self._controller.MaintainMemorySlow )
ClientGUIMenus.AppendMenuItem( memory_actions, 'clear image rendering cache', 'Tell the image rendering system to forget all current images. This will often free up a bunch of memory immediately.', self._controller.ClearCaches )
ClientGUIMenus.AppendMenuItem( memory_actions, 'clear thumbnail cache', 'Tell the thumbnail cache to forget everything and redraw all current thumbs.', self._controller.pub, 'reset_thumbnail_cache' )
ClientGUIMenus.AppendMenuItem( memory_actions, 'print garbage', 'Print some information about the python garbage to the log.', self._DebugPrintGarbage )
ClientGUIMenus.AppendMenuItem( memory_actions, 'take garbage snapshot', 'Capture current garbage object counts.', self._DebugTakeGarbageSnapshot )
ClientGUIMenus.AppendMenuItem( memory_actions, 'show garbage snapshot changes', 'Show object count differences from the last snapshot.', self._DebugShowGarbageDifferences )
ClientGUIMenus.AppendMenu( debug, memory_actions, 'memory actions' )
network_actions = QW.QMenu( debug )
ClientGUIMenus.AppendMenuItem( network_actions, 'fetch a url', 'Fetch a URL using the network engine as per normal.', self._DebugFetchAURL )
ClientGUIMenus.AppendMenu( debug, network_actions, 'network actions' )
tests = QW.QMenu( debug )
ClientGUIMenus.AppendMenuItem( tests, 'run the ui test', 'Run hydrus_dev\'s weekly UI Test. Guaranteed to work and not mess up your session, ha ha.', self._RunUITest )
ClientGUIMenus.AppendMenuItem( tests, 'run the client api test', 'Run hydrus_dev\'s weekly Client API Test. Guaranteed to work and not mess up your session, ha ha.', self._RunClientAPITest )
ClientGUIMenus.AppendMenuItem( tests, 'run the server test', 'This will try to boot the server in your install folder and initialise it. This is mostly here for testing purposes.', self._RunServerTest )
ClientGUIMenus.AppendMenu( debug, tests, 'tests, do not touch' )
ClientGUIMenus.AppendMenu( menu, debug, 'debug' )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'about Qt', 'See information about the Qt framework.', QW.QMessageBox.aboutQt, self )
ClientGUIMenus.AppendMenuItem( menu, 'about', 'See this client\'s version and other information.', self._AboutWindow )
return ( menu, '&help' )
def _InitialiseMenuInfoNetwork( self ):
menu = QW.QMenu( self )
submenu = QW.QMenu( menu )
pause_all_new_network_traffic = self._controller.new_options.GetBoolean( 'pause_all_new_network_traffic' )
self._menubar_network_all_traffic_paused = ClientGUIMenus.AppendMenuCheckItem( submenu, 'all new network traffic', 'Stop any new network jobs from sending data.', pause_all_new_network_traffic, self.FlipNetworkTrafficPaused )
ClientGUIMenus.AppendMenuCheckItem( submenu, 'always boot the client with paused network traffic', 'Always start the program with network traffic paused.', self._controller.new_options.GetBoolean( 'boot_with_network_traffic_paused' ), self._controller.new_options.FlipBoolean, 'boot_with_network_traffic_paused' )
ClientGUIMenus.AppendSeparator( submenu )
self._menubar_network_subscriptions_paused = ClientGUIMenus.AppendMenuCheckItem( submenu, 'subscriptions', 'Pause the client\'s synchronisation with website subscriptions.', HC.options[ 'pause_subs_sync' ], self.FlipSubscriptionsPaused )
self._menubar_network_nudge_subs = ClientGUIMenus.AppendMenuItem( submenu, 'nudge subscriptions awake', 'Tell the subs daemon to wake up, just in case any subs are due.', self._controller.subscriptions_manager.Wake )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuCheckItem( submenu, 'paged file import queues', 'Pause all file import queues.', self._controller.new_options.GetBoolean( 'pause_all_file_queues' ), self._controller.new_options.FlipBoolean, 'pause_all_file_queues' )
ClientGUIMenus.AppendMenuCheckItem( submenu, 'gallery searches', 'Pause all gallery imports\' searching.', self._controller.new_options.GetBoolean( 'pause_all_gallery_searches' ), self._controller.new_options.FlipBoolean, 'pause_all_gallery_searches' )
ClientGUIMenus.AppendMenuCheckItem( submenu, 'watcher checkers', 'Pause all watchers\' checking.', self._controller.new_options.GetBoolean( 'pause_all_watcher_checkers' ), self._controller.new_options.FlipBoolean, 'pause_all_watcher_checkers' )
ClientGUIMenus.AppendMenu( menu, submenu, 'pause' )
#
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'manage subscriptions', 'Change the queries you want the client to regularly import from.', self._ManageSubscriptions )
ClientGUIMenus.AppendSeparator( menu )
submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( submenu, 'review bandwidth usage and edit rules', 'See where you are consuming data.', self._ReviewBandwidth )
ClientGUIMenus.AppendMenuItem( submenu, 'review current network jobs', 'Review the jobs currently running in the network engine.', self._ReviewNetworkJobs )
ClientGUIMenus.AppendMenuItem( submenu, 'review session cookies', 'Review and edit which cookies you have for which network contexts.', self._ReviewNetworkSessions )
ClientGUIMenus.AppendMenuItem( submenu, 'manage http headers', 'Configure how the client talks to the network.', self._ManageNetworkHeaders )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage upnp', 'If your router supports it, see and edit your current UPnP NAT traversal mappings.', self._ManageUPnP )
ClientGUIMenus.AppendMenu( menu, submenu, 'data' )
#
submenu = QW.QMenu( menu )
if not ClientParsing.HTML5LIB_IS_OK:
message = 'The client was unable to import html5lib on boot. This is an important parsing library that performs better than the usual backup, lxml. Without it, some downloaders will not work well and you will miss tags and files.'
message += os.linesep * 2
message += 'You are likely running from source, so I recommend you close the client, run \'pip install html5lib\' (or whatever is appropriate for your environment) and try again. You can double-check what imported ok under help->about.'
ClientGUIMenus.AppendMenuItem( submenu, '*** html5lib not found! ***', 'Your client does not have an important library.', QW.QMessageBox.warning, self, 'Warning', message )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'import downloaders', 'Import new download capability through encoded pngs from other users.', self._ImportDownloaders )
ClientGUIMenus.AppendMenuItem( submenu, 'export downloaders', 'Export downloader components to easy-import pngs.', self._ExportDownloader )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage default tag import options', 'Change the default tag import options for each of your linked url matches.', self._ManageDefaultTagImportOptions )
ClientGUIMenus.AppendMenuItem( submenu, 'manage downloader and url display', 'Configure how downloader objects present across the client.', self._ManageDownloaderDisplay )
ClientGUIMenus.AppendSeparator( submenu )
clipboard_menu = QW.QMenu( submenu )
ClientGUIMenus.AppendMenuCheckItem( clipboard_menu, 'watcher urls', 'Automatically import watcher URLs that enter the clipboard just as if you drag-and-dropped them onto the ui.', self._controller.new_options.GetBoolean( 'watch_clipboard_for_watcher_urls' ), self._FlipClipboardWatcher, 'watch_clipboard_for_watcher_urls' )
ClientGUIMenus.AppendMenuCheckItem( clipboard_menu, 'other recognised urls', 'Automatically import recognised URLs that enter the clipboard just as if you drag-and-dropped them onto the ui.', self._controller.new_options.GetBoolean( 'watch_clipboard_for_other_recognised_urls' ), self._FlipClipboardWatcher, 'watch_clipboard_for_other_recognised_urls' )
ClientGUIMenus.AppendMenu( submenu, clipboard_menu, 'watch clipboard for urls' )
ClientGUIMenus.AppendMenu( menu, submenu, 'downloaders' )
#
submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage url class links', 'Configure how URLs present across the client.', self._ManageURLClassLinks )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage gallery url generators', 'Manage the client\'s GUGs, which convert search terms into URLs.', self._ManageGUGs )
ClientGUIMenus.AppendMenuItem( submenu, 'manage url classes', 'Configure which URLs the client can recognise.', self._ManageURLClasses )
ClientGUIMenus.AppendMenuItem( submenu, 'manage parsers', 'Manage the client\'s parsers, which convert URL content into hydrus metadata.', self._ManageParsers )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'SEMI-LEGACY: manage file lookup scripts', 'Manage how the client parses different types of web content.', self._ManageParsingScripts )
ClientGUIMenus.AppendMenu( menu, submenu, 'downloader components' )
#
submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage logins', 'Edit which domains you wish to log in to.', self._ManageLogins )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'manage login scripts', 'Manage the client\'s login scripts, which define how to log in to different sites.', self._ManageLoginScripts )
ClientGUIMenus.AppendSeparator( submenu )
ClientGUIMenus.AppendMenuItem( submenu, 'DEBUG: do tumblr GDPR click-through', 'Do a manual click-through for the tumblr GDPR page.', self._controller.CallLater, 0.0, self._controller.network_engine.login_manager.LoginTumblrGDPR )
ClientGUIMenus.AppendMenu( menu, submenu, 'logins' )
#
return ( menu, '&network' )
def _InitialiseMenuInfoPages( self ):
menu = QW.QMenu( self )
self._menubar_pages_page_count = ClientGUIMenus.AppendMenuLabel( menu, 'initialising', 'You have this many pages open.' )
self._menubar_pages_session_weight = ClientGUIMenus.AppendMenuItem( menu, 'initialising', 'Your session is this heavy.', self._ShowPageWeightInfo )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'refresh', 'If the current page has a search, refresh it.', self._Refresh )
splitter_menu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( splitter_menu, 'show/hide', 'Show or hide the panels on the left.', self._ShowHideSplitters )
ClientGUIMenus.AppendSeparator( splitter_menu )
ClientGUIMenus.AppendMenuCheckItem( splitter_menu, 'save current page\'s sash positions on client exit', 'Set whether sash position should be saved over on client exit.', self._new_options.GetBoolean( 'saving_sash_positions_on_exit' ), self._new_options.FlipBoolean, 'saving_sash_positions_on_exit' )
ClientGUIMenus.AppendSeparator( splitter_menu )
ClientGUIMenus.AppendMenuItem( splitter_menu, 'save current page\'s sash positions now', 'Save the current page\'s sash positions.', self._SaveSplitterPositions )
ClientGUIMenus.AppendSeparator( splitter_menu )
ClientGUIMenus.AppendMenuItem( splitter_menu, 'restore all pages\' sash positions to saved value', 'Restore the current sash positions for all pages to the values that are saved.', self._RestoreSplitterPositions )
ClientGUIMenus.AppendMenu( menu, splitter_menu, 'management and preview panels' )
ClientGUIMenus.AppendSeparator( menu )
self._menubar_pages_sessions_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenu( menu, self._menubar_pages_sessions_submenu, 'sessions' )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'pick a new page', 'Choose a new page to open.', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_PAGE ) )
#
self._menubar_pages_search_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenu( menu, self._menubar_pages_search_submenu, 'new search page' )
#
self._menubar_pages_petition_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenu( menu, self._menubar_pages_petition_submenu, 'new petition page' )
#
download_menu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( download_menu, 'url download', 'Open a new tab to download some separate urls.', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_URL_DOWNLOADER_PAGE ) )
ClientGUIMenus.AppendMenuItem( download_menu, 'watcher', 'Open a new tab to watch threads or other updating locations.', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_WATCHER_DOWNLOADER_PAGE ) )
ClientGUIMenus.AppendMenuItem( download_menu, 'gallery', 'Open a new tab to download from gallery sites.', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_GALLERY_DOWNLOADER_PAGE ) )
ClientGUIMenus.AppendMenuItem( download_menu, 'simple downloader', 'Open a new tab to download files from generic galleries or threads.', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_SIMPLE_DOWNLOADER_PAGE ) )
ClientGUIMenus.AppendMenu( menu, download_menu, 'new download page' )
#
self._menubar_pages_download_popup_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( self._menubar_pages_download_popup_submenu, 'an ipfs multihash', 'Enter an IPFS multihash and attempt to import whatever is returned.', self._StartIPFSDownload )
ClientGUIMenus.AppendMenu( menu, self._menubar_pages_download_popup_submenu, 'new download popup' )
#
special_menu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( special_menu, 'page of pages', 'Open a new tab that can hold more tabs.', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_PAGE_OF_PAGES ) )
ClientGUIMenus.AppendMenuItem( special_menu, 'duplicates processing', 'Open a new tab to discover and filter duplicate files.', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_DUPLICATE_FILTER_PAGE ) )
ClientGUIMenus.AppendMenu( menu, special_menu, 'new special page' )
#
ClientGUIMenus.AppendSeparator( menu )
special_command_menu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( special_command_menu, 'clear all multiwatcher highlights', 'Command all multiwatcher pages to clear their highlighted watchers.', HG.client_controller.pub, 'clear_multiwatcher_highlights' )
ClientGUIMenus.AppendMenu( menu, special_command_menu, 'special commands' )
#
return ( menu, '&pages' )
def _InitialiseMenuInfoServices( self ):
menu = QW.QMenu( self )
submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuCheckItem( submenu, 'all repository synchronisation', 'Pause the client\'s synchronisation with hydrus repositories.', HC.options['pause_repo_sync'], self._PausePlaySync, 'repo' )
ClientGUIMenus.AppendMenu( menu, submenu, 'pause' )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'review services', 'Look at the services your client connects to.', self._ReviewServices )
ClientGUIMenus.AppendMenuItem( menu, 'manage services', 'Edit the services your client connects to.', self._ManageServices )
self._menubar_services_admin_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenu( menu, self._menubar_services_admin_submenu, 'administrate services' )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'import repository update files', 'Add repository update files to the database.', self._ImportUpdateFiles )
return ( menu, '&services' )
def _InitialiseMenuInfoTags( self ):
menu = QW.QMenu( self )
ClientGUIMenus.AppendMenuItem( menu, 'migrate tags', 'Migrate tags from one place to another.', self._MigrateTags )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'manage tag display and search', 'Set which tags you want to see from which services.', self._ManageTagDisplay )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'manage tag siblings', 'Set certain tags to be automatically replaced with other tags.', self._ManageTagSiblings )
ClientGUIMenus.AppendMenuItem( menu, 'manage tag parents', 'Set certain tags to be automatically added with other tags.', self._ManageTagParents )
ClientGUIMenus.AppendMenuItem( menu, 'manage where tag siblings and parents apply', 'Set which services\' siblings and parents apply where.', self._ManageTagDisplayApplication )
#
tag_display_maintenance_menu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( tag_display_maintenance_menu, 'review tag sibling/parent maintenance', 'See how siblings and parents are currently applied.', self._ReviewTagDisplayMaintenance )
ClientGUIMenus.AppendSeparator( tag_display_maintenance_menu )
check_manager = ClientGUICommon.CheckboxManagerOptions( 'tag_display_maintenance_during_idle' )
current_value = check_manager.GetCurrentValue()
func = check_manager.Invert
ClientGUIMenus.AppendMenuCheckItem( tag_display_maintenance_menu, 'sync tag display during idle time', 'Control whether tag display maintenance can work during idle time.', current_value, func )
check_manager = ClientGUICommon.CheckboxManagerOptions( 'tag_display_maintenance_during_active' )
current_value = check_manager.GetCurrentValue()
func = check_manager.Invert
ClientGUIMenus.AppendMenuCheckItem( tag_display_maintenance_menu, 'sync tag display during normal time', 'Control whether tag display maintenance can work during normal time.', current_value, func )
ClientGUIMenus.AppendMenu( menu, tag_display_maintenance_menu, 'sibling/parent sync' )
#
return ( menu, '&tags' )
def _InitialiseMenuInfoUndo( self ):
menu = QW.QMenu( self )
self._menubar_undo_undo = ClientGUIMenus.AppendMenuItem( menu, 'initialising', 'Undo last operation.', self._controller.pub, 'undo' )
self._menubar_undo_redo = ClientGUIMenus.AppendMenuItem( menu, 'initialising', 'Redo last operation.', self._controller.pub, 'redo' )
ClientGUIMenus.AppendSeparator( menu )
self._menubar_undo_closed_pages_submenu = QW.QMenu( menu )
ClientGUIMenus.AppendMenu( menu, self._menubar_undo_closed_pages_submenu, 'closed pages' )
return ( menu, '&undo' )
def _InitialiseSession( self ):
default_gui_session = HC.options[ 'default_gui_session' ]
existing_session_names = self._controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_CONTAINER )
cannot_load_from_db = default_gui_session not in existing_session_names
load_a_blank_page = HC.options[ 'default_gui_session' ] == 'just a blank page' or cannot_load_from_db
if not load_a_blank_page:
if self._controller.LastShutdownWasBad():
# this can be upgraded to a nicer checkboxlist dialog to select pages or w/e
message = 'It looks like the last instance of the client did not shut down cleanly.'
message += os.linesep * 2
message += 'Would you like to try loading your default session "' + default_gui_session + '", or just a blank page?'
message += os.linesep * 2
message += 'This will auto-choose to open your default session in 15 seconds.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Previous shutdown was bad', yes_label = 'try to load "' + default_gui_session + '"', no_label = 'just load a blank page', auto_yes_time = 15 )
if result == QW.QDialog.Rejected:
load_a_blank_page = True
def do_it( default_gui_session, load_a_blank_page ):
try:
if load_a_blank_page:
default_location_context = HG.client_controller.services_manager.GetDefaultLocationContext()
self._notebook.NewPageQuery( default_location_context, on_deepest_notebook = True )
else:
self._notebook.LoadGUISession( default_gui_session )
finally:
last_session_save_period_minutes = self._controller.new_options.GetInteger( 'last_session_save_period_minutes' )
#self._controller.CallLaterQtSafe(self, 1.0, 'adjust size', self.adjustSize ) # some i3 thing--doesn't layout main gui on init for some reason
self._controller.CallLaterQtSafe(self, last_session_save_period_minutes * 60, 'auto save session', self.AutoSaveLastSession )
self._BootOrStopClipboardWatcherIfNeeded()
self._controller.ReportFirstSessionLoaded()
self._controller.CallLaterQtSafe( self, 0.25, 'load initial session', do_it, default_gui_session, load_a_blank_page )
def _LockServer( self, service_key, lock ):
def do_it( service, lock ):
if lock:
command = 'lock_on'
done_message = 'Server locked!'
else:
command = 'lock_off'
done_message = 'Server unlocked!'
service.Request( HC.POST, command )
HydrusData.ShowText( done_message )
if lock:
message = 'This will tell the server to lock and disconnect its database, in case you wish to make a db backup using an external program. It will not be able to serve any requests as long as it is locked. It may get funky if it is locked for hours and hours--if you need it paused for that long, I recommend just shutting it down instead.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result != QW.QDialog.Accepted:
return
service = self._controller.services_manager.GetService( service_key )
self._controller.CallToThread( do_it, service, lock )
def _STARTManageAccountTypes( self, service_key ):
admin_service = HG.client_controller.services_manager.GetService( service_key )
job_key = ClientThreading.JobKey()
job_key.SetVariable( 'popup_text_1', 'loading account types\u2026' )
self._controller.pub( job_key )
def work_callable():
response = admin_service.Request( HC.GET, 'account_types' )
account_types = response[ 'account_types' ]
return account_types
def publish_callable( account_types ):
job_key.Delete()
self._ManageAccountTypes( service_key, account_types )
def errback_callable( etype, value, tb ):
HydrusData.ShowText( 'Sorry, unable to load account types:' )
HydrusData.ShowExceptionTuple( etype, value, tb, do_wait = False )
job = ClientGUIAsync.AsyncQtJob( self, work_callable, publish_callable, errback_callable = errback_callable )
job.start()
def _ManageAccountTypes( self, service_key, account_types ):
admin_service = HG.client_controller.services_manager.GetService( service_key )
title = 'edit account types'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
panel = ClientGUIHydrusNetwork.EditAccountTypesPanel( dlg, admin_service.GetServiceType(), account_types )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
( account_types, deletee_account_type_keys_to_new_account_type_keys ) = panel.GetValue()
serialisable_deletee_account_type_keys_to_new_account_type_keys = HydrusSerialisable.SerialisableBytesDictionary( deletee_account_type_keys_to_new_account_type_keys )
def do_it():
admin_service.Request( HC.POST, 'account_types', { 'account_types' : account_types, 'deletee_account_type_keys_to_new_account_type_keys' : serialisable_deletee_account_type_keys_to_new_account_type_keys } )
self._controller.CallToThread( do_it )
def _ManageDefaultTagImportOptions( self ):
title = 'edit default tag import options'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
domain_manager = self._controller.network_engine.domain_manager
( file_post_default_tag_import_options, watchable_default_tag_import_options, url_class_keys_to_tag_import_options ) = domain_manager.GetDefaultTagImportOptions()
url_classes = domain_manager.GetURLClasses()
parsers = domain_manager.GetParsers()
url_class_keys_to_parser_keys = domain_manager.GetURLClassKeysToParserKeys()
panel = ClientGUIScrolledPanelsEdit.EditDefaultTagImportOptionsPanel( dlg, url_classes, parsers, url_class_keys_to_parser_keys, file_post_default_tag_import_options, watchable_default_tag_import_options, url_class_keys_to_tag_import_options )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
( file_post_default_tag_import_options, watchable_default_tag_import_options, url_class_keys_to_tag_import_options ) = panel.GetValue()
domain_manager.SetDefaultTagImportOptions( file_post_default_tag_import_options, watchable_default_tag_import_options, url_class_keys_to_tag_import_options )
def _ManageDownloaderDisplay( self ):
title = 'manage downloader display'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
domain_manager = self._controller.network_engine.domain_manager
gugs = domain_manager.GetGUGs()
gug_keys_to_display = domain_manager.GetGUGKeysToDisplay()
url_classes = domain_manager.GetURLClasses()
url_class_keys_to_display = domain_manager.GetURLClassKeysToDisplay()
show_unmatched_urls_in_media_viewer = HG.client_controller.new_options.GetBoolean( 'show_unmatched_urls_in_media_viewer' )
panel = ClientGUIDownloaders.EditDownloaderDisplayPanel( dlg, self._controller.network_engine, gugs, gug_keys_to_display, url_classes, url_class_keys_to_display, show_unmatched_urls_in_media_viewer )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
( gug_keys_to_display, url_class_keys_to_display, show_unmatched_urls_in_media_viewer ) = panel.GetValue()
domain_manager.SetGUGKeysToDisplay( gug_keys_to_display )
domain_manager.SetURLClassKeysToDisplay( url_class_keys_to_display )
HG.client_controller.new_options.SetBoolean( 'show_unmatched_urls_in_media_viewer', show_unmatched_urls_in_media_viewer )
def _ManageExportFolders( self ):
def qt_do_it():
export_folders = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER )
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit export folders' ) as dlg:
panel = ClientGUIExport.EditExportFoldersPanel( dlg, export_folders )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
export_folders = panel.GetValue()
existing_db_names = set( self._controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER ) )
good_names = set()
for export_folder in export_folders:
self._controller.Write( 'serialisable', export_folder )
good_names.add( export_folder.GetName() )
names_to_delete = existing_db_names - good_names
for name in names_to_delete:
self._controller.Write( 'delete_serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER, name )
self._controller.pub( 'notify_new_export_folders' )
def THREAD_do_it( controller ):
with self._delayed_dialog_lock:
original_pause_status = controller.options[ 'pause_export_folders_sync' ]
controller.options[ 'pause_export_folders_sync' ] = True
try:
if HG.export_folders_running:
job_key = ClientThreading.JobKey()
try:
job_key.SetVariable( 'popup_text_1', 'Waiting for import folders to finish.' )
controller.pub( 'message', job_key )
while HG.export_folders_running:
time.sleep( 0.1 )
if HG.started_shutdown:
return
finally:
job_key.Delete()
try:
controller.CallBlockingToQt( self, qt_do_it )
except HydrusExceptions.QtDeadWindowException:
pass
finally:
controller.options[ 'pause_export_folders_sync' ] = original_pause_status
controller.pub( 'notify_new_export_folders' )
self._controller.CallToThread( THREAD_do_it, self._controller )
def _ManageGUGs( self ):
title = 'manage gallery url generators'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
domain_manager = self._controller.network_engine.domain_manager
gugs = domain_manager.GetGUGs()
panel = ClientGUIDownloaders.EditGUGsPanel( dlg, gugs )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
gugs = panel.GetValue()
domain_manager.SetGUGs( gugs )
def _ManageImportFolders( self ):
def qt_do_it():
import_folders = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_IMPORT_FOLDER )
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit import folders' ) as dlg:
panel = ClientGUIImport.EditImportFoldersPanel( dlg, import_folders )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
import_folders = panel.GetValue()
existing_db_names = set( self._controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_IMPORT_FOLDER ) )
good_names = set()
for import_folder in import_folders:
good_names.add( import_folder.GetName() )
self._controller.Write( 'serialisable', import_folder )
names_to_delete = existing_db_names.difference( good_names )
for name in names_to_delete:
self._controller.Write( 'delete_serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_IMPORT_FOLDER, name )
self._controller.pub( 'notify_new_import_folders' )
def THREAD_do_it( controller ):
with self._delayed_dialog_lock:
original_pause_status = controller.options[ 'pause_import_folders_sync' ]
controller.options[ 'pause_import_folders_sync' ] = True
try:
if HG.import_folders_running:
job_key = ClientThreading.JobKey()
try:
job_key.SetVariable( 'popup_text_1', 'Waiting for import folders to finish.' )
controller.pub( 'message', job_key )
while HG.import_folders_running:
time.sleep( 0.1 )
if HG.started_shutdown:
return
finally:
job_key.Delete()
try:
controller.CallBlockingToQt(self, qt_do_it)
except HydrusExceptions.QtDeadWindowException:
pass
finally:
controller.options[ 'pause_import_folders_sync' ] = original_pause_status
controller.pub( 'notify_new_import_folders' )
self._controller.CallToThread( THREAD_do_it, self._controller )
def _ManageLogins( self ):
title = 'manage logins'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
login_manager = self._controller.network_engine.login_manager
login_scripts = login_manager.GetLoginScripts()
domains_to_login_info = login_manager.GetDomainsToLoginInfo()
panel = ClientGUILogin.EditLoginsPanel( dlg, self._controller.network_engine, login_scripts, domains_to_login_info )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
domains_to_login_info = panel.GetValue()
login_manager.SetDomainsToLoginInfo( domains_to_login_info )
domains_to_login = panel.GetDomainsToLoginAfterOK()
if len( domains_to_login ) > 0:
self._controller.network_engine.ForceLogins( domains_to_login )
def _ManageLoginScripts( self ):
title = 'manage login scripts'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
login_manager = self._controller.network_engine.login_manager
login_scripts = login_manager.GetLoginScripts()
panel = ClientGUILogin.EditLoginScriptsPanel( dlg, login_scripts )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
login_scripts = panel.GetValue()
login_manager.SetLoginScripts( login_scripts )
def _ManageNetworkHeaders( self ):
title = 'manage http headers'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
domain_manager = self._controller.network_engine.domain_manager
network_contexts_to_custom_header_dicts = domain_manager.GetNetworkContextsToCustomHeaderDicts()
panel = ClientGUINetwork.EditNetworkContextCustomHeadersPanel( dlg, network_contexts_to_custom_header_dicts )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
network_contexts_to_custom_header_dicts = panel.GetValue()
domain_manager.SetNetworkContextsToCustomHeaderDicts( network_contexts_to_custom_header_dicts )
def _ManageOptions( self ):
title = 'manage options'
frame_key = 'manage_options_dialog'
with ClientGUITopLevelWindowsPanels.DialogManage( self, title, frame_key ) as dlg:
panel = ClientGUIScrolledPanelsManagement.ManageOptionsPanel( dlg )
dlg.SetPanel( panel )
dlg.exec()
qt_style_name = self._controller.new_options.GetNoneableString( 'qt_style_name' )
qt_stylesheet_name = self._controller.new_options.GetNoneableString( 'qt_stylesheet_name' )
try:
if qt_style_name is None:
ClientGUIStyle.SetStyleFromName( ClientGUIStyle.ORIGINAL_STYLE_NAME )
else:
ClientGUIStyle.SetStyleFromName( qt_style_name )
except Exception as e:
HydrusData.ShowException( e )
try:
if qt_stylesheet_name is None:
ClientGUIStyle.ClearStylesheet()
else:
ClientGUIStyle.SetStylesheetFromPath( qt_stylesheet_name )
except Exception as e:
HydrusData.ShowException( e )
ClientGUIFunctions.UpdateAppDisplayName()
self._controller.pub( 'wake_daemons' )
self.SetStatusBarDirty()
self._controller.pub( 'refresh_page_name' )
self._controller.pub( 'notify_new_colourset' )
self._controller.pub( 'notify_new_favourite_tags' )
self._UpdateSystemTrayIcon()
def _ManageParsers( self ):
title = 'manage parsers'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
domain_manager = self._controller.network_engine.domain_manager
parsers = domain_manager.GetParsers()
panel = ClientGUIParsing.EditParsersPanel( dlg, parsers )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
parsers = panel.GetValue()
domain_manager.SetParsers( parsers )
domain_manager.TryToLinkURLClassesAndParsers()
def _ManageParsingScripts( self ):
title = 'manage parsing scripts'
with ClientGUITopLevelWindowsPanels.DialogManage( self, title ) as dlg:
panel = ClientGUIParsing.ManageParsingScriptsPanel( dlg )
dlg.SetPanel( panel )
dlg.exec()
def _ManageServer( self, service_key ):
title = 'manage server services'
with ClientGUITopLevelWindowsPanels.DialogManage( self, title ) as dlg:
panel = ClientGUIServersideServices.ManageServerServicesPanel( dlg, service_key )
dlg.SetPanel( panel )
dlg.exec()
def _ManageServices( self, auto_account_creation_service_key = None ):
original_pause_status = HC.options[ 'pause_repo_sync' ]
HC.options[ 'pause_repo_sync' ] = True
try:
title = 'manage services'
with ClientGUITopLevelWindowsPanels.DialogManage( self, title ) as dlg:
panel = ClientGUIClientsideServices.ManageClientServicesPanel( dlg, auto_account_creation_service_key = auto_account_creation_service_key )
dlg.SetPanel( panel )
dlg.exec()
finally:
HC.options[ 'pause_repo_sync' ] = original_pause_status
def _ManageServiceOptionsNullificationPeriod( self, service_key ):
service = self._controller.services_manager.GetService( service_key )
nullification_period = service.GetNullificationPeriod()
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit anonymisation period' ) as dlg:
panel = ClientGUIScrolledPanels.EditSingleCtrlPanel( dlg )
height_num_chars = 20
control = ClientGUITime.TimeDeltaCtrl( panel, min = HydrusNetwork.MIN_NULLIFICATION_PERIOD, days = True, hours = True, minutes = True, seconds = True )
control.SetValue( nullification_period )
panel.SetControl( control )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
nullification_period = control.GetValue()
if nullification_period > HydrusNetwork.MAX_NULLIFICATION_PERIOD:
QW.QMessageBox.information( self, 'Information', 'Sorry, the value you entered was too high. The max is {}.'.format( HydrusData.TimeDeltaToPrettyTimeDelta( HydrusNetwork.MAX_NULLIFICATION_PERIOD ) ) )
return
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'setting anonymisation period' )
job_key.SetVariable( 'popup_text_1', 'uploading\u2026' )
self._controller.pub( 'message', job_key )
def work_callable():
service.Request( HC.POST, 'options_nullification_period', { 'nullification_period' : nullification_period } )
return 1
def publish_callable( gumpf ):
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
service.SetAccountRefreshDueNow()
def errback_ui_cleanup_callable():
job_key.SetVariable( 'popup_text_1', 'error!' )
job_key.Finish()
job = ClientGUIAsync.AsyncQtJob( self, work_callable, publish_callable, errback_ui_cleanup_callable = errback_ui_cleanup_callable )
job.start()
def _ManageServiceOptionsUpdatePeriod( self, service_key ):
service = self._controller.services_manager.GetService( service_key )
update_period = service.GetUpdatePeriod()
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit update period' ) as dlg:
panel = ClientGUIScrolledPanels.EditSingleCtrlPanel( dlg )
height_num_chars = 20
control = ClientGUITime.TimeDeltaCtrl( panel, min = HydrusNetwork.MIN_UPDATE_PERIOD, days = True, hours = True, minutes = True, seconds = True )
control.SetValue( update_period )
panel.SetControl( control )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
update_period = control.GetValue()
if update_period > HydrusNetwork.MAX_UPDATE_PERIOD:
QW.QMessageBox.information( self, 'Information', 'Sorry, the value you entered was too high. The max is {}.'.format( HydrusData.TimeDeltaToPrettyTimeDelta( HydrusNetwork.MAX_UPDATE_PERIOD ) ) )
return
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'setting update period' )
job_key.SetVariable( 'popup_text_1', 'uploading\u2026' )
self._controller.pub( 'message', job_key )
def work_callable():
service.Request( HC.POST, 'options_update_period', { 'update_period' : update_period } )
return 1
def publish_callable( gumpf ):
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
service.DoAFullMetadataResync()
service.SetAccountRefreshDueNow()
def errback_ui_cleanup_callable():
job_key.SetVariable( 'popup_text_1', 'error!' )
job_key.Finish()
job = ClientGUIAsync.AsyncQtJob( self, work_callable, publish_callable, errback_ui_cleanup_callable = errback_ui_cleanup_callable )
job.start()
def _ManageSubscriptions( self ):
def qt_do_it( subscriptions, missing_query_log_container_names, surplus_query_log_container_names ):
if len( missing_query_log_container_names ) > 0:
text = '{} subscription queries had missing database data! This is a serious error!'.format( HydrusData.ToHumanInt( len( missing_query_log_container_names ) ) )
text += os.linesep * 2
text += 'If you continue, the client will now create and save empty file/search logs for those queries, essentially resetting them, but if you know you need to exit and fix your database in a different way, cancel out now.'
text += os.linesep * 2
text += 'If you do not know why this happened, you may have had a hard drive fault. Please consult "install_dir/db/help my db is broke.txt", and you may want to contact hydrus dev.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, title = 'Missing Query Logs!', yes_label = 'continue', no_label = 'back out' )
if result == QW.QDialog.Accepted:
from hydrus.client.importing import ClientImportSubscriptionQuery
for missing_query_log_container_name in missing_query_log_container_names:
query_log_container = ClientImportSubscriptionQuery.SubscriptionQueryLogContainer( missing_query_log_container_name )
HG.client_controller.WriteSynchronous( 'serialisable', query_log_container )
for subscription in subscriptions:
for query_header in subscription.GetQueryHeaders():
if query_header.GetQueryLogContainerName() in missing_query_log_container_names:
query_header.Reset( query_log_container )
HG.client_controller.subscriptions_manager.SetSubscriptions( subscriptions ) # save the reset
else:
raise HydrusExceptions.CancelledException()
if len( surplus_query_log_container_names ) > 0:
text = 'When loading subscription data, the client discovered surplus orphaned subscription data for {} queries! This data is harmless and no longer used. The situation is however unusual, and probably due to an unusual deletion routine or a bug.'.format( HydrusData.ToHumanInt( len( surplus_query_log_container_names ) ) )
text += os.linesep * 2
text += 'If you continue, this surplus data will backed up to your database directory and then safely deleted from the database itself, but if you recently did manual database editing and know you need to exit and fix your database in a different way, cancel out now.'
text += os.linesep * 2
text += 'If you do not know why this happened, hydrus dev would be interested in being told about it and the surrounding circumstances.'
result = ClientGUIDialogsQuick.GetYesNo( self, text, title = 'Orphan Query Logs!', yes_label = 'continue', no_label = 'back out' )
if result == QW.QDialog.Accepted:
sub_dir = os.path.join( self._controller.GetDBDir(), 'orphaned_query_log_containers' )
HydrusPaths.MakeSureDirectoryExists( sub_dir )
for surplus_query_log_container_name in surplus_query_log_container_names:
surplus_query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, surplus_query_log_container_name )
backup_path = os.path.join( sub_dir, 'qlc_{}.json'.format( surplus_query_log_container_name ) )
with open( backup_path, 'w', encoding = 'utf-8' ) as f:
f.write( surplus_query_log_container.DumpToString() )
HG.client_controller.WriteSynchronous( 'delete_serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, surplus_query_log_container_name )
else:
raise HydrusExceptions.CancelledException()
title = 'manage subscriptions'
frame_key = 'manage_subscriptions_dialog'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title, frame_key ) as dlg:
panel = ClientGUISubscriptions.EditSubscriptionsPanel( dlg, subscriptions )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
( subscriptions, edited_query_log_containers, deletee_query_log_container_names ) = panel.GetValue()
return ( subscriptions, edited_query_log_containers, deletee_query_log_container_names )
else:
raise HydrusExceptions.CancelledException()
def THREAD_do_it( controller ):
job_key = ClientThreading.JobKey()
job_key.SetVariable( 'popup_text_1', 'Waiting for current subscription work to finish.' )
controller.pub( 'message', job_key )
with self._delayed_dialog_lock:
try:
try:
HG.client_controller.subscriptions_manager.PauseSubscriptionsForEditing()
while HG.client_controller.subscriptions_manager.SubscriptionsRunning():
time.sleep( 0.1 )
if HG.started_shutdown:
return
finally:
job_key.Delete()
subscriptions = HG.client_controller.subscriptions_manager.GetSubscriptions()
expected_query_log_container_names = set()
for subscription in subscriptions:
expected_query_log_container_names.update( subscription.GetAllQueryLogContainerNames() )
actual_query_log_container_names = set( HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER ) )
missing_query_log_container_names = expected_query_log_container_names.difference( actual_query_log_container_names )
surplus_query_log_container_names = actual_query_log_container_names.difference( expected_query_log_container_names )
try:
done_job_key = ClientThreading.JobKey()
( subscriptions, edited_query_log_containers, deletee_query_log_container_names ) = controller.CallBlockingToQt( self, qt_do_it, subscriptions, missing_query_log_container_names, surplus_query_log_container_names )
done_job_key.SetVariable( 'popup_text_1', 'Saving subscription changes.' )
controller.pub( 'message', done_job_key )
HG.client_controller.WriteSynchronous(
'serialisable_atomic',
overwrite_types_and_objs = ( [ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION ], subscriptions ),
set_objs = edited_query_log_containers,
deletee_types_to_names = { HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER : deletee_query_log_container_names }
)
HG.client_controller.subscriptions_manager.SetSubscriptions( subscriptions )
except HydrusExceptions.QtDeadWindowException:
pass
except HydrusExceptions.CancelledException:
HG.client_controller.subscriptions_manager.Wake()
finally:
done_job_key.Delete()
finally:
HG.client_controller.subscriptions_manager.ResumeSubscriptionsAfterEditing()
self._controller.CallToThread( THREAD_do_it, self._controller )
def _ManageTagDisplay( self ):
title = 'manage tag display and search'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
panel = ClientGUITags.EditTagDisplayManagerPanel( dlg, self._controller.tag_display_manager )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
tag_display_manager = panel.GetValue()
tag_display_manager.SetDirty()
self._controller.tag_display_manager = tag_display_manager
self._controller.pub( 'notify_new_tag_display_rules' )
def _ManageTagDisplayApplication( self ):
title = 'manage where tag siblings and parents apply'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
( master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys ) = self._controller.Read( 'tag_display_application' )
panel = ClientGUITags.EditTagDisplayApplication( dlg, master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
( edited_master_service_keys_to_sibling_applicable_service_keys, edited_master_service_keys_to_parent_applicable_service_keys ) = panel.GetValue()
self._controller.Write( 'tag_display_application', edited_master_service_keys_to_sibling_applicable_service_keys, edited_master_service_keys_to_parent_applicable_service_keys )
def _ManageTagParents( self ):
with ClientGUITopLevelWindowsPanels.DialogManage( self, 'manage tag parents' ) as dlg:
panel = ClientGUITags.ManageTagParents( dlg )
dlg.SetPanel( panel )
dlg.exec()
def _ManageTagSiblings( self ):
with ClientGUITopLevelWindowsPanels.DialogManage( self, 'manage tag siblings' ) as dlg:
panel = ClientGUITags.ManageTagSiblings( dlg )
dlg.SetPanel( panel )
dlg.exec()
def _ManageURLClasses( self ):
title = 'manage url classes'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
domain_manager = self._controller.network_engine.domain_manager
url_classes = domain_manager.GetURLClasses()
panel = ClientGUIDownloaders.EditURLClassesPanel( dlg, url_classes )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
url_classes = panel.GetValue()
domain_manager.SetURLClasses( url_classes )
domain_manager.TryToLinkURLClassesAndParsers()
def _ManageURLClassLinks( self ):
title = 'manage url class links'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
domain_manager = self._controller.network_engine.domain_manager
url_classes = domain_manager.GetURLClasses()
parsers = domain_manager.GetParsers()
url_class_keys_to_parser_keys = domain_manager.GetURLClassKeysToParserKeys()
panel = ClientGUIDownloaders.EditURLClassLinksPanel( dlg, self._controller.network_engine, url_classes, parsers, url_class_keys_to_parser_keys )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
url_class_keys_to_parser_keys = panel.GetValue()
domain_manager.SetURLClassKeysToParserKeys( url_class_keys_to_parser_keys )
def _ManageUPnP( self ):
with ClientGUIDialogsManage.DialogManageUPnP( self ) as dlg: dlg.exec()
def _MigrateDatabase( self ):
with ClientGUITopLevelWindowsPanels.DialogNullipotent( self, 'migrate database' ) as dlg:
panel = ClientGUIScrolledPanelsReview.MigrateDatabasePanel( dlg, self._controller )
dlg.SetPanel( panel )
dlg.exec()
self._menu_updater_database.update()
def _MigrateTags( self ):
default_tag_service_key = self._controller.new_options.GetKey( 'default_tag_service_tab' )
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'migrate tags' )
panel = ClientGUIScrolledPanelsReview.MigrateTagsPanel( frame, default_tag_service_key )
frame.SetPanel( panel )
def _ModifyAccount( self, service_key ):
service = self._controller.services_manager.GetService( service_key )
with ClientGUIDialogs.DialogTextEntry( self, 'Enter the account id for the account to be modified.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
try:
account_key = bytes.fromhex( dlg.GetValue() )
except:
QW.QMessageBox.critical( self, 'Error', 'Could not parse that account id' )
return
subject_account_identifiers = [ HydrusNetwork.AccountIdentifier( account_key = account_key ) ]
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'manage accounts' )
panel = ClientGUIHydrusNetwork.ModifyAccountsPanel( frame, service_key, subject_account_identifiers )
frame.SetPanel( panel )
def _OpenDBFolder( self ):
HydrusPaths.LaunchDirectory( self._controller.GetDBDir() )
def _OpenExportFolder( self ):
export_path = ClientExporting.GetExportPath()
if export_path is None:
HydrusData.ShowText( 'Unfortunately, your export path could not be determined!' )
else:
HydrusPaths.LaunchDirectory( export_path )
def _OpenInstallFolder( self ):
HydrusPaths.LaunchDirectory( HC.BASE_DIR )
def _PausePlaySync( self, sync_type ):
if sync_type == 'repo':
HC.options[ 'pause_repo_sync' ] = not HC.options[ 'pause_repo_sync' ]
self._controller.pub( 'notify_restart_repo_sync' )
elif sync_type == 'export_folders':
HC.options[ 'pause_export_folders_sync' ] = not HC.options[ 'pause_export_folders_sync' ]
self._controller.pub( 'notify_restart_export_folders_daemon' )
elif sync_type == 'import_folders':
HC.options[ 'pause_import_folders_sync' ] = not HC.options[ 'pause_import_folders_sync' ]
self._controller.pub( 'notify_restart_import_folders_daemon' )
self._controller.Write( 'save_options', HC.options )
def _Refresh( self ):
page = self._notebook.GetCurrentMediaPage()
if page is not None:
page.RefreshQuery()
def _RefreshStatusBar( self ):
page = self._notebook.GetCurrentMediaPage()
if page is None:
media_status = ''
else:
media_status = page.GetPrettyStatus()
if self._controller.CurrentlyIdle():
idle_status = 'idle'
idle_tooltip = 'client is idle, it can do maintenance work'
else:
idle_status = ''
idle_tooltip = None
hydrus_busy_status = self._controller.GetThreadPoolBusyStatus()
hydrus_busy_tooltip = 'just a simple measure of how much hydrus wants to do atm'
if self._controller.SystemBusy():
busy_status = 'CPU busy'
busy_tooltip = 'this computer has been doing work recently, so some hydrus maintenance will not start'
else:
busy_status = ''
busy_tooltip = None
( db_status, job_name ) = HG.client_controller.GetDBStatus()
if job_name is not None and job_name != '':
db_tooltip = 'current db job: {}'.format( job_name )
else:
db_tooltip = None
self._statusbar.setToolTip( job_name )
self._statusbar.SetStatusText( media_status, 0 )
self._statusbar.SetStatusText( idle_status, 2, tooltip = idle_tooltip )
self._statusbar.SetStatusText( hydrus_busy_status, 3, tooltip = hydrus_busy_tooltip )
self._statusbar.SetStatusText( busy_status, 4, tooltip = busy_tooltip )
self._statusbar.SetStatusText( db_status, 5, tooltip = db_tooltip )
def _RegenerateTagCache( self ):
message = 'This will delete and then recreate the fast search cache for one or all tag services.'
message += os.linesep * 2
message += 'If you have a lot of tags and files, it can take a little while, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless. It fixes missing autocomplete or tag search results.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'regenerate_tag_cache', tag_service_key = tag_service_key )
def _RegenerateLocalHashCache( self ):
message = 'This will delete and then recreate the local hash cache, which keeps a small record of hashes for files on your hard drive. It isn\'t super important, but it speeds most operations up, and this routine fixes it when broken.'
message += os.linesep * 2
message += 'If you have a lot of files, it can take a long time, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'regenerate_local_hash_cache' )
def _RegenerateLocalTagCache( self ):
message = 'This will delete and then recreate the local tag cache, which keeps a small record of tags for files on your hard drive. It isn\'t super important, but it speeds most operations up, and this routine fixes it when broken.'
message += os.linesep * 2
message += 'If you have a lot of tags and files, it can take a long time, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'regenerate_local_tag_cache' )
def _RegenerateTagDisplayMappingsCache( self ):
message = 'This will delete and then recreate the tag \'display\' mappings cache, which is used for user-presented tag searching, loading, and autocomplete counts. This is useful if miscounting (particularly related to siblings/parents) has somehow occurred.'
message += os.linesep * 2
message += 'If you have a lot of tags and files, it can take a long time, during which the gui may hang. All siblings and parents will have to be resynced.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'regenerate_tag_display_mappings_cache', tag_service_key = tag_service_key )
def _RegenerateTagDisplayPendingMappingsCache( self ):
message = 'This will delete and then recreate the pending tags on the tag \'display\' mappings cache, which is used for user-presented tag searching, loading, and autocomplete counts. This is useful if you have \'ghost\' pending tags or counts hanging around.'
message += os.linesep * 2
message += 'If you have a millions of tags, pending or current, it can take a long time, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'regenerate_tag_display_pending_mappings_cache', tag_service_key = tag_service_key )
def _RegenerateTagMappingsCache( self ):
message = 'WARNING: Do not run this for no reason! On a large database, this could take hours to finish!'
message += os.linesep * 2
message += 'This will delete and then recreate the entire tag \'storage\' mappings cache, which is used for tag calculation based on actual values and autocomplete counts in editing contexts like _manage tags_. This is useful if miscounting has somehow occurred.'
message += os.linesep * 2
message += 'If you have a lot of tags and files, it can take a long time, during which the gui may hang. It necessarily involves a regeneration of the tag display mappings cache, which relies on the storage cache, and the tag text search cache. All siblings and parents will have to be resynced.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'regenerate_tag_mappings_cache', tag_service_key = tag_service_key )
def _RegenerateTagPendingMappingsCache( self ):
message = 'This will delete and then recreate the pending tags on the whole tag mappings cache, which is used for multiple kinds of tag searching, loading, and autocomplete counts. This is useful if you have \'ghost\' pending tags or counts hanging around.'
message += os.linesep * 2
message += 'If you have a millions of tags, pending or current, it can take a long time, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'regenerate_tag_pending_mappings_cache', tag_service_key = tag_service_key )
def _RegenerateSimilarFilesTree( self ):
message = 'This will delete and then recreate the similar files search tree. This is useful if it has somehow become unbalanced and similar files searches are running slow.'
message += os.linesep * 2
message += 'If you have a lot of files, it can take a little while, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
( result, was_cancelled ) = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it', check_for_cancelled = True )
if result == QW.QDialog.Accepted:
self._controller.Write( 'regenerate_similar_files' )
def _RegenerateTagCacheSearchableSubtagsMaps( self ):
message = 'This will regenerate the fast search cache\'s \'unusual character logic\' lookup map, for one or all tag services.'
message += os.linesep * 2
message += 'If you have a lot of tags, it can take a little while, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless. It fixes missing autocomplete search results.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'regenerate_searchable_subtag_maps', tag_service_key = tag_service_key )
def _RegenerateTagParentsLookupCache( self ):
message = 'This will delete and then recreate the tag parents lookup cache, which is used for all basic tag parents operations. This is useful if it has become damaged or otherwise desynchronised.'
message += os.linesep * 2
message += 'It should only take a second or two.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'regenerate_tag_parents_cache' )
def _RegenerateTagSiblingsLookupCache( self ):
message = 'This will delete and then recreate the tag siblings lookup cache, which is used for all basic tag sibling operations. This is useful if it has become damaged or otherwise desynchronised.'
message += os.linesep * 2
message += 'It should only take a second or two. It necessarily involves a regeneration of the tag parents lookup cache.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
self._controller.Write( 'regenerate_tag_siblings_and_parents_cache' )
def _RepairInvalidTags( self ):
message = 'This will scan all your tags and repair any that are invalid. This might mean taking out unrenderable characters or cleaning up improper whitespace. If there is a tag collision once cleaned, it may add a (1)-style number on the end.'
message += os.linesep * 2
message += 'If you have a lot of tags, it can take a long time, during which the gui may hang. If it finds bad tags, you should restart the program once it is complete.'
message += os.linesep * 2
message += 'If you have not had tag rendering problems, there is no reason to run this.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetStatusTitle( 'repairing invalid tags' )
self._controller.pub( 'message', job_key )
self._controller.Write( 'repair_invalid_tags', job_key = job_key )
def _RepopulateMappingsTables( self ):
message = 'WARNING: Do not run this for no reason!'
message += os.linesep * 2
message += 'If you have significant local tags (e.g. \'my tags\') storage, recently had a \'malformed\' client.mappings.db file, and have since gone through clone/repair and now have a truncated file, this routine will attempt to recover missing tags from the smaller tag cache stored in client.caches.db.'
message += os.linesep * 2
message += 'It can only recover tags for files currently stored by your client. It will take some time, during which the gui may hang. Once it is done, you probably want to regenerate your tag mappings cache, so that you are completely synced again.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'I have a reason to run this, let\'s do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_text_title', 'repopulating mapping tables' )
self._controller.pub( 'modal_message', job_key )
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'repopulate_mappings_from_cache', tag_service_key = tag_service_key, job_key = job_key )
def _RepopulateTagCacheMissingSubtags( self ):
message = 'This will repopulate the fast search cache\'s subtag search, filling in missing entries, for one or all tag services.'
message += os.linesep * 2
message += 'If you have a lot of tags and files, it can take a little while, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless. It fixes missing autocomplete or tag search results.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'repopulate_tag_cache_missing_subtags', tag_service_key = tag_service_key )
def _RepopulateTagDisplayMappingsCache( self ):
message = 'This will go through your mappings cache and fill in any missing files. It is radically faster than a full regen, and adds siblings and parents instantly, but it only solves the problem of missing file rows.'
message += os.linesep * 2
message += 'If you have a millions of tags, pending or current, it can take a long time, during which the gui may hang.'
message += os.linesep * 2
message += 'If you do not have a specific reason to run this, it is pointless.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it--now choose which service', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
try:
tag_service_key = GetTagServiceKeyForMaintenance( self )
except HydrusExceptions.CancelledException:
return
self._controller.Write( 'repopulate_tag_display_mappings_cache', tag_service_key = tag_service_key )
def _RestoreSplitterPositions( self ):
self._controller.pub( 'set_splitter_positions', HC.options[ 'hpos' ], HC.options[ 'vpos' ] )
def _STARTReviewAllAccounts( self, service_key ):
admin_service = HG.client_controller.services_manager.GetService( service_key )
job_key = ClientThreading.JobKey()
job_key.SetVariable( 'popup_text_1', 'loading accounts\u2026' )
self._controller.pub( job_key )
def work_callable():
response = admin_service.Request( HC.GET, 'all_accounts' )
accounts = response[ 'accounts' ]
return accounts
def publish_callable( accounts ):
job_key.Delete()
self._ReviewAllAccounts( service_key, accounts )
def errback_callable( etype, value, tb ):
HydrusData.ShowText( 'Sorry, unable to load accounts:' )
HydrusData.ShowExceptionTuple( etype, value, tb, do_wait = False )
job = ClientGUIAsync.AsyncQtJob( self, work_callable, publish_callable, errback_callable = errback_callable )
job.start()
def _ReviewAllAccounts( self, service_key, accounts ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'all accounts' )
panel = ClientGUIHydrusNetwork.ListAccountsPanel( frame, service_key, accounts )
frame.SetPanel( panel )
def _ReviewBandwidth( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review bandwidth use and edit rules' )
panel = ClientGUINetwork.ReviewAllBandwidthPanel( frame, self._controller )
frame.SetPanel( panel )
def _ReviewFileMaintenance( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'file maintenance' )
panel = ClientGUIScrolledPanelsReview.ReviewFileMaintenance( frame, self._controller )
frame.SetPanel( panel )
def _ReviewNetworkJobs( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review network jobs' )
panel = ClientGUINetwork.ReviewNetworkJobs( frame, self._controller )
frame.SetPanel( panel )
def _ReviewNetworkSessions( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review session cookies' )
panel = ClientGUINetwork.ReviewNetworkSessionsPanel( frame, self._controller.network_engine.session_manager )
frame.SetPanel( panel )
def _ReviewServices( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review services', 'review_services' )
panel = ClientGUIClientsideServices.ReviewServicesPanel( frame, self._controller )
frame.SetPanel( panel )
def _ReviewTagDisplayMaintenance( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'tag display maintenance' )
panel = ClientGUITags.ReviewTagDisplayMaintenancePanel( frame )
frame.SetPanel( panel )
def _ReviewThreads( self ):
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review threads' )
panel = ClientGUIScrolledPanelsReview.ReviewThreads( frame, self._controller )
frame.SetPanel( panel )
def _ReviewVacuumData( self ):
job_key = ClientThreading.JobKey( cancellable = True )
def work_callable():
vacuum_data = self._controller.Read( 'vacuum_data' )
return vacuum_data
def publish_callable( vacuum_data ):
if job_key.IsCancelled():
return
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self, 'review vacuum data' )
panel = ClientGUIScrolledPanelsReview.ReviewVacuumData( frame, self._controller, vacuum_data )
frame.SetPanel( panel )
job_key.Delete()
job_key.SetVariable( 'popup_text_1', 'loading database data' )
self._controller.pub( 'message', job_key )
job = ClientGUIAsync.AsyncQtJob( self, work_callable, publish_callable )
job.start()
def _RunExportFolder( self, name = None ):
if self._controller.options[ 'pause_export_folders_sync' ]:
HydrusData.ShowText( 'Export folders are currently paused under the \'file\' menu. Please unpause them and try this again.' )
if name is None:
export_folders = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER )
else:
export_folder = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_EXPORT_FOLDER, name )
export_folders = [ export_folder ]
for export_folder in export_folders:
export_folder.RunNow()
self._controller.WriteSynchronous( 'serialisable', export_folder )
self._controller.pub( 'notify_new_export_folders' )
def _RunClientAPITest( self ):
# this is not to be a comprehensive test of client api functions, but a holistic sanity check to make sure everything is wired up right at UI level, with a live functioning client
from hydrus.client import ClientAPI
def do_it():
# job key
client_api_service = HG.client_controller.services_manager.GetService( CC.CLIENT_API_SERVICE_KEY )
port = client_api_service.GetPort()
was_running_before = port is not None
if not was_running_before:
port = 6666
client_api_service._port = port
HG.client_controller.RestartClientServerServices()
time.sleep( 5 )
#
api_permissions = ClientAPI.APIPermissions( name = 'hydrus test access', basic_permissions = list( ClientAPI.ALLOWED_PERMISSIONS ), search_tag_filter = HydrusTags.TagFilter() )
access_key = api_permissions.GetAccessKey()
HG.client_controller.client_api_manager.AddAccess( api_permissions )
#
try:
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'client api test' )
HG.client_controller.pub( 'message', job_key )
import requests
import json
s = requests.Session()
s.verify = False
s.headers[ 'Hydrus-Client-API-Access-Key' ] = access_key.hex()
s.headers[ 'Content-Type' ] = 'application/json'
if client_api_service.UseHTTPS():
schema = 'https'
else:
schema = 'http'
api_base = '{}://127.0.0.1:{}'.format( schema, port )
#
r = s.get( '{}/api_version'.format( api_base ) )
j = r.json()
if j[ 'version' ] != HC.CLIENT_API_VERSION:
HydrusData.ShowText( 'version incorrect!: {}, {}'.format( j[ 'version' ], HC.CLIENT_API_VERSION ) )
#
job_key.SetVariable( 'popup_text_1', 'add url test' )
local_tag_services = HG.client_controller.services_manager.GetServices( ( HC.LOCAL_TAG, ) )
local_tag_service = random.choice( local_tag_services )
local_tag_service_name = local_tag_service.GetName()
samus_url = 'https://safebooru.org/index.php?page=post&s=view&id=3195917'
samus_hash_hex = '78f92ba4a786225ee2a1236efa6b7dc81dd729faf4af99f96f3e20bad6d8b538'
samus_test_tag = 'client api test tag'
samus_test_tag_filterable = 'client api test tag filterable'
destination_page_name = 'client api test'
request_args = {}
request_args[ 'url' ] = samus_url
request_args[ 'destination_page_name' ] = destination_page_name
request_args[ 'service_names_to_additional_tags' ] = {
local_tag_service_name : [ samus_test_tag ]
}
request_args[ 'filterable_tags' ] = [
samus_test_tag_filterable
]
data = json.dumps( request_args )
r = s.post( '{}/add_urls/add_url'.format( api_base ), data = data )
time.sleep( 0.25 )
#
job_key.SetVariable( 'popup_text_1', 'get session test' )
def get_client_api_page():
r = s.get( '{}/manage_pages/get_pages'.format( api_base ) )
pages_to_process = [ r.json()[ 'pages' ] ]
pages = []
while len( pages_to_process ) > 0:
page_to_process = pages_to_process.pop()
if page_to_process[ 'page_type' ] == ClientGUIManagement.MANAGEMENT_TYPE_PAGE_OF_PAGES:
pages_to_process.extend( page_to_process[ 'pages' ] )
else:
pages.append( page_to_process )
for page in pages:
if page[ 'name' ] == destination_page_name:
return page
client_api_page = get_client_api_page()
if client_api_page is None:
raise Exception( 'Could not find download page!' )
destination_page_key_hex = client_api_page[ 'page_key' ]
def get_hash_ids():
r = s.get( '{}/manage_pages/get_page_info?page_key={}'.format( api_base, destination_page_key_hex ) )
hash_ids = r.json()[ 'page_info' ][ 'media' ][ 'hash_ids' ]
return hash_ids
hash_ids = get_hash_ids()
if len( hash_ids ) == 0:
time.sleep( 3 )
hash_ids = get_hash_ids()
if len( hash_ids ) == 0:
raise Exception( 'The download page had no hashes!' )
#
def get_hash_ids_to_hashes_and_tag_info():
r = s.get( '{}/get_files/file_metadata?file_ids={}'.format( api_base, json.dumps( hash_ids ) ) )
hash_ids_to_hashes_and_tag_info = {}
for item in r.json()[ 'metadata' ]:
hash_ids_to_hashes_and_tag_info[ item[ 'file_id' ] ] = ( item[ 'hash' ], item[ 'service_names_to_statuses_to_tags' ] )
return hash_ids_to_hashes_and_tag_info
hash_ids_to_hashes_and_tag_info = get_hash_ids_to_hashes_and_tag_info()
samus_hash_id = None
for ( hash_id, ( hash_hex, tag_info ) ) in hash_ids_to_hashes_and_tag_info.items():
if hash_hex == samus_hash_hex:
samus_hash_id = hash_id
if samus_hash_id is None:
raise Exception( 'Could not find the samus hash!' )
samus_tag_info = hash_ids_to_hashes_and_tag_info[ samus_hash_id ][1]
if samus_test_tag not in samus_tag_info[ local_tag_service_name ][ str( HC.CONTENT_STATUS_CURRENT ) ]:
raise Exception( 'Did not have the tag!' )
#
def qt_session_gubbins():
self.ProposeSaveGUISession( CC.LAST_SESSION_SESSION_NAME )
page = self._notebook.GetPageFromPageKey( bytes.fromhex( destination_page_key_hex ) )
self._notebook.ShowPage( page )
self._notebook.CloseCurrentPage()
self.ProposeSaveGUISession( CC.LAST_SESSION_SESSION_NAME )
location_context = ClientLocation.LocationContext.STATICCreateSimple( CC.COMBINED_LOCAL_FILE_SERVICE_KEY )
page = self._notebook.NewPageQuery( location_context )
return page.GetPageKey()
page_key = HG.client_controller.CallBlockingToQt( HG.client_controller.gui, qt_session_gubbins )
#
request_args = {}
request_args[ 'page_key' ] = page_key.hex()
request_args[ 'hashes' ] = [ '78f92ba4a786225ee2a1236efa6b7dc81dd729faf4af99f96f3e20bad6d8b538' ]
data = json.dumps( request_args )
r = s.post( '{}/manage_pages/add_files'.format( api_base ), data = data )
time.sleep( 0.25 )
r = s.post( '{}/manage_pages/add_files'.format( api_base ), data = data )
time.sleep( 0.25 )
finally:
#
HG.client_controller.client_api_manager.DeleteAccess( ( access_key, ) )
#
if not was_running_before:
client_api_service._port = None
HG.client_controller.RestartClientServerServices()
job_key.Delete()
HG.client_controller.CallToThread( do_it )
def _RunUITest( self ):
def qt_open_pages():
page_of_pages = self._notebook.NewPagesNotebook( on_deepest_notebook = False, select_page = True )
t = 0.25
default_location_context = HG.client_controller.services_manager.GetDefaultLocationContext()
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self._notebook.NewPageQuery, default_location_context, page_name = 'test', on_deepest_notebook = True )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_PAGE_OF_PAGES ) )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', page_of_pages.NewPageQuery, default_location_context, page_name ='test', on_deepest_notebook = False )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_DUPLICATE_FILTER_PAGE ) )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_GALLERY_DOWNLOADER_PAGE ) )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_SIMPLE_DOWNLOADER_PAGE ) )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_URL_DOWNLOADER_PAGE ) )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_NEW_WATCHER_DOWNLOADER_PAGE ) )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProposeSaveGUISession, CC.LAST_SESSION_SESSION_NAME )
return page_of_pages
def qt_close_unclose_one_page():
self._notebook.CloseCurrentPage()
HG.client_controller.CallLaterQtSafe( self, 0.5, 'test job', self._UnclosePage )
def qt_close_pages( page_of_pages ):
indices = list( range( page_of_pages.count() ) )
indices.reverse()
t = 0.0
for i in indices:
HG.client_controller.CallLaterQtSafe( self, t, 'test job', page_of_pages._ClosePage, i )
t += 0.25
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self._notebook.CloseCurrentPage )
t += 0.25
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.DeleteAllClosedPages )
def qt_test_ac():
default_location_context = HG.client_controller.services_manager.GetDefaultLocationContext()
SYS_PRED_REFRESH = 1.0
page = self._notebook.NewPageQuery( default_location_context, page_name = 'test', select_page = True )
t = 0.5
HG.client_controller.CallLaterQtSafe( self, t, 'test job', page.SetSearchFocus )
ac_widget = page.GetManagementPanel()._tag_autocomplete._text_ctrl
t += 0.5
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_SET_MEDIA_FOCUS ) )
t += 0.5
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self.ProcessApplicationCommand, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_SET_SEARCH_FOCUS ) )
t += 0.5
uias = QP.UIActionSimulator()
for c in 'the colour of her hair':
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, ord( c ), text = c )
t += 0.01
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Return )
t += SYS_PRED_REFRESH
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Return )
t += SYS_PRED_REFRESH
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Down )
t += 0.05
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Return )
t += SYS_PRED_REFRESH
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Down )
t += 0.05
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Return )
t += SYS_PRED_REFRESH
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Return )
for i in range( 16 ):
t += SYS_PRED_REFRESH
for j in range( i + 1 ):
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Down )
t += 0.1
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Return )
t += SYS_PRED_REFRESH
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, None, QC.Qt.Key_Return )
t += 1.0
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Down )
t += 0.05
HG.client_controller.CallLaterQtSafe( self, t, 'test job', uias.Char, ac_widget, QC.Qt.Key_Return )
t += 1.0
HG.client_controller.CallLaterQtSafe( self, t, 'test job', self._notebook.CloseCurrentPage )
def do_it():
# pages
page_of_pages = HG.client_controller.CallBlockingToQt( self, qt_open_pages )
time.sleep( 4 )
HG.client_controller.CallBlockingToQt( self, qt_close_unclose_one_page )
time.sleep( 1.5 )
HG.client_controller.CallBlockingToQt( self, qt_close_pages, page_of_pages )
time.sleep( 5 )
del page_of_pages
# a/c
HG.client_controller.CallBlockingToQt( self, qt_test_ac )
HG.client_controller.CallToThread( do_it )
def _RunServerTest( self ):
def do_it():
host = '127.0.0.1'
port = HC.DEFAULT_SERVER_ADMIN_PORT
if HydrusNetworking.LocalPortInUse( port ):
HydrusData.ShowText( 'The server appears to be already running. Either that, or something else is using port ' + str( HC.DEFAULT_SERVER_ADMIN_PORT ) + '.' )
return
else:
try:
HydrusData.ShowText( 'Starting server\u2026' )
db_param = '-d=' + self._controller.GetDBDir()
if HC.PLATFORM_WINDOWS:
server_frozen_path = os.path.join( HC.BASE_DIR, 'server.exe' )
else:
server_frozen_path = os.path.join( HC.BASE_DIR, 'server' )
if os.path.exists( server_frozen_path ):
cmd = [ server_frozen_path, db_param ]
else:
python_executable = sys.executable
if python_executable.endswith( 'client.exe' ) or python_executable.endswith( 'client' ):
raise Exception( 'Could not automatically set up the server--could not find server executable or python executable.' )
if 'pythonw' in python_executable:
python_executable = python_executable.replace( 'pythonw', 'python' )
server_script_path = os.path.join( HC.BASE_DIR, 'server.py' )
cmd = [ python_executable, server_script_path, db_param ]
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = False )
HydrusData.CheckProgramIsNotShuttingDown()
subprocess.Popen( cmd, **sbp_kwargs )
time_waited = 0
while not HydrusNetworking.LocalPortInUse( port ):
time.sleep( 3 )
time_waited += 3
if time_waited > 30:
raise Exception( 'The server\'s port did not appear!' )
except:
HydrusData.ShowText( 'I tried to start the server, but something failed!' + os.linesep + traceback.format_exc() )
return
time.sleep( 5 )
HydrusData.ShowText( 'Creating admin service\u2026' )
admin_service_key = HydrusData.GenerateKey()
service_type = HC.SERVER_ADMIN
name = 'local server admin'
admin_service = ClientServices.GenerateService( admin_service_key, service_type, name )
all_services = list( self._controller.services_manager.GetServices() )
all_services.append( admin_service )
self._controller.SetServices( all_services )
time.sleep( 1 )
admin_service = self._controller.services_manager.GetService( admin_service_key ) # let's refresh it
credentials = HydrusNetwork.Credentials( host, port )
admin_service.SetCredentials( credentials )
time.sleep( 1 )
response = admin_service.Request( HC.GET, 'access_key', { 'registration_key' : b'init' } )
access_key = response[ 'access_key' ]
credentials = HydrusNetwork.Credentials( host, port, access_key )
admin_service.SetCredentials( credentials )
#
HydrusData.ShowText( 'Admin service initialised.' )
QP.CallAfter( ClientGUIFrames.ShowKeys, 'access', (access_key,) )
#
time.sleep( 5 )
HydrusData.ShowText( 'Creating tag and file services\u2026' )
response = admin_service.Request( HC.GET, 'services' )
serverside_services = response[ 'services' ]
service_key = HydrusData.GenerateKey()
tag_service = HydrusNetwork.GenerateService( service_key, HC.TAG_REPOSITORY, 'tag service', HC.DEFAULT_SERVICE_PORT )
serverside_services.append( tag_service )
service_key = HydrusData.GenerateKey()
file_service = HydrusNetwork.GenerateService( service_key, HC.FILE_REPOSITORY, 'file service', HC.DEFAULT_SERVICE_PORT + 1 )
serverside_services.append( file_service )
response = admin_service.Request( HC.POST, 'services', { 'services' : serverside_services } )
service_keys_to_access_keys = response[ 'service_keys_to_access_keys' ]
deletee_service_keys = []
with HG.dirty_object_lock:
self._controller.WriteSynchronous( 'update_server_services', admin_service_key, serverside_services, service_keys_to_access_keys, deletee_service_keys )
self._controller.RefreshServices()
HydrusData.ShowText( 'Done! Check services->review services to see your new server and its services.' )
text = 'This will attempt to start the server in the same install directory as this client, initialise it, and store the resultant admin accounts in the client.'
result = ClientGUIDialogsQuick.GetYesNo( self, text )
if result == QW.QDialog.Accepted:
self._controller.CallToThread( do_it )
def _SaveSplitterPositions( self ):
page = self._notebook.GetCurrentMediaPage()
if page is not None:
( HC.options[ 'hpos' ], HC.options[ 'vpos' ] ) = page.GetSashPositions()
def _SetPassword( self ):
message = '''You can set a password to be asked for whenever the client starts.
Though not foolproof by any means, it will stop noobs from easily seeing your files if you leave your machine unattended.
Do not ever forget your password! If you do, you'll have to manually insert a yaml-dumped python dictionary into a sqlite database or run from edited source to regain easy access. This is not trivial.
The password is cleartext here but obscured in the entry dialog. Enter a blank password to remove.'''
with ClientGUIDialogs.DialogTextEntry( self, message, allow_blank = True, min_char_width = 24 ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
password = dlg.GetValue()
if password == '':
password = None
self._controller.Write( 'set_password', password )
def _SetMediaFocus( self ):
page = self._notebook.GetCurrentMediaPage()
if page is not None:
page.SetMediaFocus()
def _SetSearchFocus( self ):
page = self._notebook.GetCurrentMediaPage()
if page is not None:
page.SetSearchFocus()
def _SetupBackupPath( self ):
backup_intro = 'Everything in your client is stored in the database, which consists of a handful of .db files and a single subdirectory that contains all your media files. It is a very good idea to maintain a regular backup schedule--to save from hard drive failure, serious software fault, accidental deletion, or any other unexpected problem. It sucks to lose all your work, so make sure it can\'t happen!'
backup_intro += os.linesep * 2
backup_intro += 'If you prefer to create a manual backup with an external program like FreeFileSync, then please cancel out of the dialog after this and set up whatever you like, but if you would rather a simple solution, simply select a directory and the client will remember it as the designated backup location. Creating or updating your backup can be triggered at any time from the database menu.'
backup_intro += os.linesep * 2
backup_intro += 'An ideal backup location is initially empty and on a different hard drive.'
backup_intro += os.linesep * 2
backup_intro += 'If you have a large database (100,000+ files) or a slow hard drive, creating the initial backup may take a long time--perhaps an hour or more--but updating an existing backup should only take a couple of minutes (since the client only has to copy new or modified files). Try to update your backup every week!'
backup_intro += os.linesep * 2
backup_intro += 'If you would like some more info on making or restoring backups, please consult the help\'s \'installing and updating\' page.'
QW.QMessageBox.information( self, 'Information', backup_intro )
with QP.DirDialog( self, 'Select backup location.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
if path == '':
path = None
if path == self._controller.GetDBDir():
QW.QMessageBox.critical( self, 'Error', 'That directory is your current database directory! You cannot backup to the same location you are backing up from!' )
return
if os.path.exists( path ):
filenames = os.listdir( path )
num_files = len( filenames )
if num_files == 0:
extra_info = 'It looks currently empty, which is great--there is no danger of anything being overwritten.'
elif 'client.db' in filenames:
extra_info = 'It looks like a client database already exists in the location--be certain that it is ok to overwrite it.'
else:
extra_info = 'It seems to have some files already in it--be careful and make sure you chose the correct location.'
else:
extra_info = 'The path does not exist yet--it will be created when you make your first backup.'
text = 'You chose "' + path + '". Here is what I understand about it:'
text += os.linesep * 2
text += extra_info
text += os.linesep * 2
text += 'Are you sure this is the correct directory?'
result = ClientGUIDialogsQuick.GetYesNo( self, text )
if result == QW.QDialog.Accepted:
self._new_options.SetNoneableString( 'backup_path', path )
text = 'Would you like to create your backup now?'
result = ClientGUIDialogsQuick.GetYesNo( self, text )
if result == QW.QDialog.Accepted:
self._BackupDatabase()
def _ShowHideSplitters( self ):
page = self._notebook.GetCurrentMediaPage()
if page is not None:
page.ShowHideSplit()
def _ShowPageWeightInfo( self ):
(
total_active_page_count,
total_active_num_hashes,
total_active_num_seeds,
total_closed_page_count,
total_closed_num_hashes,
total_closed_num_seeds
) = self.GetTotalPageCounts()
total_active_num_hashes_weight = ClientGUIPages.ConvertNumHashesToWeight( total_active_num_hashes )
total_active_num_seeds_weight = ClientGUIPages.ConvertNumSeedsToWeight( total_active_num_seeds )
total_closed_num_hashes_weight = ClientGUIPages.ConvertNumHashesToWeight( total_closed_num_hashes )
total_closed_num_seeds_weight = ClientGUIPages.ConvertNumSeedsToWeight( total_closed_num_seeds )
message = 'Session weight is a simple representation of your pages combined memory and CPU load. A file counts as 1, and a URL counts as 20.'
message += os.linesep * 2
message += 'Try to keep the total below 10 million! It is also generally better to spread it around--have five download pages each of 500k weight rather than one page with 2.5M.'
message += os.linesep * 2
message += 'Your {} open pages\' total is: {}'.format( total_active_page_count, HydrusData.ToHumanInt( total_active_num_hashes_weight + total_active_num_seeds_weight ) )
message += os.linesep * 2
message += 'Specifically, your file weight is {} and URL weight is {}.'.format( HydrusData.ToHumanInt( total_active_num_hashes_weight ), HydrusData.ToHumanInt( total_active_num_seeds_weight ) )
message += os.linesep * 2
message += 'For extra info, your {} closed pages (in the undo list) have total weight {}, being file weight {} and URL weight {}.'.format(
total_closed_page_count,
HydrusData.ToHumanInt( total_closed_num_hashes_weight + total_closed_num_seeds_weight ),
HydrusData.ToHumanInt( total_closed_num_hashes_weight ),
HydrusData.ToHumanInt( total_closed_num_seeds_weight )
)
QW.QMessageBox.information( self, 'Information', message )
def _StartIPFSDownload( self ):
ipfs_services = self._controller.services_manager.GetServices( ( HC.IPFS, ), randomised = True )
if len( ipfs_services ) > 0:
if len( ipfs_services ) == 1:
( service, ) = ipfs_services
else:
choice_tuples = [ ( service.GetName(), service ) for service in ipfs_services ]
try:
service = ClientGUIDialogsQuick.SelectFromList( self, 'Select which IPFS Daemon', choice_tuples )
except HydrusExceptions.CancelledException:
return
with ClientGUIDialogs.DialogTextEntry( self, 'Enter multihash to download.' ) as dlg:
result = dlg.exec()
if result == QW.QDialog.Accepted:
multihash = dlg.GetValue()
service.ImportFile( multihash )
def _SwitchBoolean( self, name ):
if name == 'cache_report_mode':
HG.cache_report_mode = not HG.cache_report_mode
elif name == 'callto_report_mode':
HG.callto_report_mode = not HG.callto_report_mode
elif name == 'canvas_tile_outline_mode':
HG.canvas_tile_outline_mode = not HG.canvas_tile_outline_mode
elif name == 'daemon_report_mode':
HG.daemon_report_mode = not HG.daemon_report_mode
elif name == 'db_report_mode':
HG.db_report_mode = not HG.db_report_mode
elif name == 'db_ui_hang_relief_mode':
HG.db_ui_hang_relief_mode = not HG.db_ui_hang_relief_mode
elif name == 'file_import_report_mode':
HG.file_import_report_mode = not HG.file_import_report_mode
elif name == 'file_report_mode':
HG.file_report_mode = not HG.file_report_mode
elif name == 'gui_report_mode':
HG.gui_report_mode = not HG.gui_report_mode
elif name == 'hover_window_report_mode':
HG.hover_window_report_mode = not HG.hover_window_report_mode
elif name == 'media_load_report_mode':
HG.media_load_report_mode = not HG.media_load_report_mode
elif name == 'mpv_report_mode':
HG.mpv_report_mode = not HG.mpv_report_mode
level = 'debug' if HG.mpv_report_mode else 'fatal'
self._controller.pub( 'set_mpv_log_level', level )
elif name == 'network_report_mode':
HG.network_report_mode = not HG.network_report_mode
elif name == 'phash_generation_report_mode':
HG.phash_generation_report_mode = not HG.phash_generation_report_mode
elif name == 'pubsub_report_mode':
HG.pubsub_report_mode = not HG.pubsub_report_mode
elif name == 'shortcut_report_mode':
HG.shortcut_report_mode = not HG.shortcut_report_mode
elif name == 'subprocess_report_mode':
HG.subprocess_report_mode = not HG.subprocess_report_mode
elif name == 'subscription_report_mode':
HG.subscription_report_mode = not HG.subscription_report_mode
elif name == 'thumbnail_debug_mode':
HG.thumbnail_debug_mode = not HG.thumbnail_debug_mode
elif name == 'force_idle_mode':
HG.force_idle_mode = not HG.force_idle_mode
self._controller.pub( 'wake_idle_workers' )
self.SetStatusBarDirty()
elif name == 'no_page_limit_mode':
HG.no_page_limit_mode = not HG.no_page_limit_mode
def _TestServerBusy( self, service_key ):
def do_it( service ):
result_bytes = service.Request( HC.GET, 'busy' )
if result_bytes == b'1':
HydrusData.ShowText( 'server is busy' )
elif result_bytes == b'0':
HydrusData.ShowText( 'server is not busy' )
else:
HydrusData.ShowText( 'server responded in a way I do not understand' )
service = self._controller.services_manager.GetService( service_key )
self._controller.CallToThread( do_it, service )
def _UnclosePage( self, closed_page_index = None ):
if closed_page_index is None:
if len( self._closed_pages ) == 0:
return
closed_page_index = len( self._closed_pages ) - 1
( time_closed, page ) = self._closed_pages.pop( closed_page_index )
self._controller.UnclosePageKeys( page.GetPageKeys() )
self._controller.pub( 'notify_page_unclosed', page )
self._menu_updater_undo.update()
self._controller.pub( 'notify_new_pages' )
def _UploadPending( self, service_key ):
service = self._controller.services_manager.GetService( service_key )
try:
if isinstance( service, ClientServices.ServiceRestricted ):
service.CheckFunctional( including_bandwidth = False )
else:
service.CheckFunctional()
if isinstance( service, ClientServices.ServiceRepository ):
if not service.IsMostlyCaughtUp():
raise Exception( 'Repository processing is not caught up--please process more before you upload new content.' )
except Exception as e:
QW.QMessageBox.critical( self, 'Error', 'Unfortunately, there is a problem with starting the upload: ' + str( e ) )
return
self._currently_uploading_pending.add( service_key )
self._menu_updater_pending.update()
self._controller.CallToThread( THREADUploadPending, service_key )
def _UpdateSystemTrayIcon( self, currently_booting = False ):
if not ClientGUISystemTray.SystemTrayAvailable() or ( not HC.PLATFORM_WINDOWS and not HG.client_controller.new_options.GetBoolean( 'advanced_mode' ) ):
return
new_options = self._controller.new_options
always_show_system_tray_icon = new_options.GetBoolean( 'always_show_system_tray_icon' )
need_system_tray = always_show_system_tray_icon
if self._currently_minimised_to_system_tray:
need_system_tray = True
if need_system_tray:
if not self._have_system_tray_icon:
self._system_tray_icon = ClientGUISystemTray.ClientSystemTrayIcon( self )
self._system_tray_icon.highlight.connect( self.RestoreOrActivateWindow )
self._system_tray_icon.flip_show_ui.connect( self._FlipShowHideWholeUI )
self._system_tray_icon.exit_client.connect( self.TryToExit )
self._system_tray_icon.flip_pause_network_jobs.connect( self.FlipNetworkTrafficPaused )
self._system_tray_icon.flip_pause_subscription_jobs.connect( self.FlipSubscriptionsPaused )
self._have_system_tray_icon = True
self._system_tray_icon.show()
self._system_tray_icon.SetShouldAlwaysShow( always_show_system_tray_icon )
self._system_tray_icon.SetUIIsCurrentlyShown( not self._currently_minimised_to_system_tray )
self._system_tray_icon.SetNetworkTrafficPaused( new_options.GetBoolean( 'pause_all_new_network_traffic' ) )
self._system_tray_icon.SetSubscriptionsPaused( HC.options[ 'pause_subs_sync' ] )
else:
if self._have_system_tray_icon:
self._system_tray_icon.deleteLater()
self._system_tray_icon = None
self._have_system_tray_icon = False
def _VacuumDatabase( self ):
text = 'This will rebuild the database, rewriting all indices and tables to be contiguous and optimising most operations. It also truncates the database files, recovering unused space back to your hard drive. It typically happens automatically every few months, but you can force it here.'
text += os.linesep * 2
text += 'If you have no reason to run this, it is usually pointless. If you have a very large database on an HDD instead of an SSD, it may take upwards of an hour, during which your gui may hang. A popup message will show its status.'
text += os.linesep * 2
text += 'A \'soft\' vacuum will only reanalyze those databases that are due for a check in the normal db maintenance cycle. If nothing is due, it will return immediately.'
text += os.linesep * 2
text += 'A \'full\' vacuum will immediately force a vacuum for the entire database. This can take substantially longer.'
( result, was_cancelled ) = ClientGUIDialogsQuick.GetYesNo( self, text, title = 'Choose how thorough your vacuum will be.', yes_label = 'soft', no_label = 'full', check_for_cancelled = True )
if was_cancelled:
return
if result == QW.QDialog.Accepted:
self._controller.Write( 'vacuum', maintenance_mode = HC.MAINTENANCE_FORCED )
elif result == QW.QDialog.Rejected:
self._controller.Write( 'vacuum', maintenance_mode = HC.MAINTENANCE_FORCED, force_vacuum = True )
def _VacuumServer( self, service_key ):
def do_it( service ):
started = HydrusData.GetNow()
service.Request( HC.POST, 'vacuum' )
HydrusData.ShowText( 'Server vacuum started!' )
time.sleep( 10 )
result_bytes = service.Request( HC.GET, 'busy' )
while result_bytes == b'1':
if HG.view_shutdown:
return
time.sleep( 10 )
result_bytes = service.Request( HC.GET, 'busy' )
it_took = HydrusData.GetNow() - started
HydrusData.ShowText( 'Server vacuum done in ' + HydrusData.TimeDeltaToPrettyTimeDelta( it_took ) + '!' )
message = 'This will tell the server to lock and vacuum its database files. It may take some time to complete, during which time it will not be able to serve any requests.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label = 'do it', no_label = 'forget it' )
if result == QW.QDialog.Accepted:
service = self._controller.services_manager.GetService( service_key )
self._controller.CallToThread( do_it, service )
def AddModalMessage( self, job_key: ClientThreading.JobKey ):
if job_key.IsCancelled() or job_key.IsDeleted():
return
if job_key.IsDone():
self._controller.pub( 'message', job_key )
return
dialog_is_open = ClientGUIFunctions.DialogIsOpen()
if self._CurrentlyMinimisedOrHidden() or dialog_is_open or not ClientGUIFunctions.TLWOrChildIsActive( self ):
self._pending_modal_job_keys.add( job_key )
else:
HG.client_controller.pub( 'pause_all_media' )
title = job_key.GetStatusTitle()
if title is None:
title = 'important job'
hide_close_button = not job_key.IsCancellable()
with ClientGUITopLevelWindowsPanels.DialogNullipotent( self, title, hide_buttons = hide_close_button, do_not_activate = True ) as dlg:
panel = ClientGUIPopupMessages.PopupMessageDialogPanel( dlg, job_key )
dlg.SetPanel( panel )
dlg.exec()
def AskToDeleteAllClosedPages( self ):
message = 'Clear the {} closed pages?'.format( HydrusData.ToHumanInt( len( self._closed_pages ) ) )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result == QW.QDialog.Accepted:
self.DeleteAllClosedPages()
def AutoSaveLastSession( self ):
only_save_last_session_during_idle = self._controller.new_options.GetBoolean( 'only_save_last_session_during_idle' )
if only_save_last_session_during_idle and not self._controller.CurrentlyIdle():
self._controller.CallLaterQtSafe( self, 60, 'auto session save wait loop', self.AutoSaveLastSession )
else:
if HC.options[ 'default_gui_session' ] == CC.LAST_SESSION_SESSION_NAME:
only_changed_page_data = True
about_to_save = True
session = self._notebook.GetCurrentGUISession( CC.LAST_SESSION_SESSION_NAME, only_changed_page_data, about_to_save )
session = self._FleshOutSessionWithCleanDataIfNeeded( self._notebook, CC.LAST_SESSION_SESSION_NAME, session )
callable = self.AutoSaveLastSession
last_session_save_period_minutes = self._controller.new_options.GetInteger( 'last_session_save_period_minutes' )
next_call_delay = last_session_save_period_minutes * 60
def do_it( controller, session, win, next_call_delay, callable ):
controller.SaveGUISession( session )
controller.CallLaterQtSafe( win, next_call_delay, 'auto save session', callable )
self._controller.CallToThread( do_it, self._controller, session, self, next_call_delay, callable )
def closeEvent( self, event ):
if self._controller.new_options.GetBoolean( 'close_client_to_system_tray' ):
self._FlipShowHideWholeUI()
return
self.TryToExit()
event.ignore() # we always ignore, as we'll close through the window through other means
def CreateNewSubscriptionGapDownloader( self, gug_key_and_name, query_text, file_import_options, tag_import_options, file_limit ):
page = self._notebook.GetOrMakeGalleryDownloaderPage( desired_page_name = 'subscription gap downloaders', select_page = True )
if page is None:
HydrusData.ShowText( 'Sorry, could not create the downloader page! Is your session super full atm?' )
management_controller = page.GetManagementController()
multiple_gallery_import = management_controller.GetVariable( 'multiple_gallery_import' )
multiple_gallery_import.PendSubscriptionGapDownloader( gug_key_and_name, query_text, file_import_options, tag_import_options, file_limit )
self._notebook.ShowPage( page )
def DeleteAllClosedPages( self ):
deletee_pages = [ page for ( time_closed, page ) in self._closed_pages ]
self._closed_pages = []
if len( deletee_pages ) > 0:
self._DestroyPages( deletee_pages )
self._menu_updater_undo.update()
def DeleteOldClosedPages( self ):
new_closed_pages = []
now = HydrusData.GetNow()
timeout = 60 * 60
deletee_pages = []
old_closed_pages = self._closed_pages
self._closed_pages = []
for ( time_closed, page ) in old_closed_pages:
if time_closed + timeout < now:
deletee_pages.append( page )
else:
self._closed_pages.append( ( time_closed, page ) )
if len( old_closed_pages ) != len( self._closed_pages ):
self._menu_updater_undo.update()
self._DestroyPages( deletee_pages )
def DoFileStorageRebalance( self, job_key: ClientThreading.JobKey ):
self._controller.CallToThread( self._controller.client_files_manager.Rebalance, job_key )
job_key.SetStatusTitle( 'rebalancing files' )
with ClientGUITopLevelWindowsPanels.DialogNullipotent( None, 'migrating files' ) as dlg:
panel = ClientGUIPopupMessages.PopupMessageDialogPanel( dlg, job_key, hide_main_gui = True )
dlg.SetPanel( panel )
dlg.exec()
self._MigrateDatabase()
def EventIconize( self, event: QG.QWindowStateChangeEvent ):
if self.isMinimized():
self._was_maximised = event.oldState() & QC.Qt.WindowMaximized
if not self._currently_minimised_to_system_tray and self._controller.new_options.GetBoolean( 'minimise_client_to_system_tray' ):
self._FlipShowHideWholeUI()
def EventMove( self, event ):
if HydrusData.TimeHasPassedFloat( self._last_move_pub + 0.1 ):
self._controller.pub( 'top_level_window_move_event' )
self._last_move_pub = HydrusData.GetNowPrecise()
return True # was: event.ignore()
def TIMEREventAnimationUpdate( self ):
if self._currently_minimised_to_system_tray:
return
try:
windows = list( self._animation_update_windows )
for window in windows:
if not QP.isValid( window ):
self._animation_update_windows.discard( window )
continue
tlw = window.window()
if not tlw or not QP.isValid( tlw ):
self._animation_update_windows.discard( window )
continue
if self._currently_minimised_to_system_tray:
continue
try:
if HG.profile_mode:
summary = 'Profiling animation timer: ' + repr( window )
HydrusData.Profile( summary, 'window.TIMERAnimationUpdate()', globals(), locals(), min_duration_ms = HG.ui_timer_profile_min_job_time_ms )
else:
window.TIMERAnimationUpdate()
except Exception:
self._animation_update_windows.discard( window )
except:
# obsolote comment below, leaving it just in case
#
# unusual error catch here, just to experiment. user was getting wxAssertionError on m_window failed, no GetSize() without window
# I figured at the time that this is some window manager being unhappy with doing animation on a hidden window,
# but it could also be a half-dead window trying to draw to a dead bmp or something, and then getting stuck somehow
# traceback was on the for loop list iteration line,
# which I think was just the C++/wxAssertionError having trouble making the right trace wew
self._animation_update_windows = set()
windows = []
if len( self._animation_update_windows ) == 0:
self._animation_update_timer.stop()
def FlipDarkmode( self ):
current_colourset = self._new_options.GetString( 'current_colourset' )
if current_colourset == 'darkmode':
new_colourset = 'default'
elif current_colourset == 'default':
new_colourset = 'darkmode'
self._new_options.SetString( 'current_colourset', new_colourset )
HG.client_controller.pub( 'notify_new_colourset' )
def FlipNetworkTrafficPaused( self ):
self._controller.network_engine.PausePlayNewJobs()
self._UpdateSystemTrayIcon()
self._menu_updater_network.update()
def FlipSubscriptionsPaused( self ):
HC.options[ 'pause_subs_sync' ] = not HC.options[ 'pause_subs_sync' ]
self._controller.subscriptions_manager.Wake()
self._controller.Write( 'save_options', HC.options )
self._UpdateSystemTrayIcon()
self._menu_updater_network.update()
def GetCurrentPage( self ):
return self._notebook.GetCurrentMediaPage()
def GetCurrentSessionPageAPIInfoDict( self ):
return self._notebook.GetSessionAPIInfoDict( is_selected = True )
def GetMPVWidget( self, parent ):
if len( self._persistent_mpv_widgets ) == 0:
mpv_widget = ClientGUIMPV.mpvWidget( parent )
self._persistent_mpv_widgets.append( mpv_widget )
mpv_widget = self._persistent_mpv_widgets.pop()
if mpv_widget.parentWidget() is self:
mpv_widget.setParent( parent )
return mpv_widget
def GetPageFromPageKey( self, page_key ):
return self._notebook.GetPageFromPageKey( page_key )
def GetPageAPIInfoDict( self, page_key, simple ):
page = self._notebook.GetPageFromPageKey( page_key )
if page is None:
return None
else:
return page.GetAPIInfoDict( simple )
def GetTotalPageCounts( self ):
total_active_page_count = self._notebook.GetNumPages()
total_closed_page_count = len( self._closed_pages )
( total_active_num_hashes, total_active_num_seeds ) = self._notebook.GetTotalNumHashesAndSeeds()
total_closed_num_hashes = 0
total_closed_num_seeds = 0
for ( time_closed, page ) in self._closed_pages:
( num_hashes, num_seeds ) = page.GetTotalNumHashesAndSeeds()
total_closed_num_hashes += num_hashes
total_closed_num_seeds += num_seeds
return (
total_active_page_count,
total_active_num_hashes,
total_active_num_seeds,
total_closed_page_count,
total_closed_num_hashes,
total_closed_num_seeds
)
def HideToSystemTray( self ):
shown = not self._currently_minimised_to_system_tray
windows_or_advanced_mode = HC.PLATFORM_WINDOWS or HG.client_controller.new_options.GetBoolean( 'advanced_mode' )
good_to_go = ClientGUISystemTray.SystemTrayAvailable() and windows_or_advanced_mode
if shown and good_to_go:
self._FlipShowHideWholeUI()
def IShouldRegularlyUpdate( self, window ):
current_page = self.GetCurrentPage()
if current_page is not None:
in_current_page = ClientGUIFunctions.IsQtAncestor( window, current_page )
if in_current_page:
return True
in_other_window = window.window() != self
return in_other_window
def ImportFiles( self, paths ):
# can more easily do this when file_seeds are doing their own tags
# get current media page
# if it is an import page, ask user if they want to add it to the page or make a new one
# if using existing, then load the panel without file import options
# think about how to merge 'delete_after_success' or not--maybe this can be handled by file_seeds as well
self._ImportFiles( paths )
def ImportURLFromAPI( self, url, filterable_tags, additional_service_keys_to_tags, destination_page_name, destination_page_key, show_destination_page ):
try:
( normalised_url, result_text ) = self._ImportURL( url, filterable_tags = filterable_tags, additional_service_keys_to_tags = additional_service_keys_to_tags, destination_page_name = destination_page_name, destination_page_key = destination_page_key, show_destination_page = show_destination_page )
return ( normalised_url, result_text )
except ( HydrusExceptions.URLClassException, HydrusExceptions.NetworkException ):
raise
except HydrusExceptions.DataMissing as e:
raise HydrusExceptions.BadRequestException( str( e ) )
except Exception as e:
HydrusData.PrintException( e )
raise HydrusExceptions.ServerException( str( e ) )
def ImportURLFromDragAndDrop( self, url ):
try:
self._ImportURL( url )
except Exception as e:
HydrusData.ShowException( e )
def ImportURL( self, url, destination_page_name ):
try:
self._ImportURL( url, destination_page_name = destination_page_name, show_destination_page = False )
except Exception as e:
HydrusData.ShowException( e )
def IsCurrentPage( self, page_key ):
result = self._notebook.GetCurrentMediaPage()
if result is None:
return False
else:
return page_key == result.GetPageKey()
def MaintainCanvasFrameReferences( self ):
self._canvas_frames = [ frame for frame in self._canvas_frames if QP.isValid( frame ) ]
def NewPageImportHDD( self, paths, file_import_options, paths_to_additional_service_keys_to_tags, delete_after_success ):
management_controller = ClientGUIManagement.CreateManagementControllerImportHDD( paths, file_import_options, paths_to_additional_service_keys_to_tags, delete_after_success )
self._notebook.NewPage( management_controller, on_deepest_notebook = True )
def NewPageQuery( self, location_context: ClientLocation.LocationContext, initial_hashes = None, initial_predicates = None, page_name = None, do_sort = False, select_page = True, activate_window = False ):
if initial_hashes is None:
initial_hashes = []
if initial_predicates is None:
initial_predicates = []
self._notebook.NewPageQuery( location_context, initial_hashes = initial_hashes, initial_predicates = initial_predicates, page_name = page_name, on_deepest_notebook = True, do_sort = do_sort, select_page = select_page )
if activate_window and not self.isActiveWindow():
self.activateWindow()
def NotifyAdvancedMode( self ):
self._menu_updater_network.update()
self._menu_updater_file.update()
def NotifyClosedPage( self, page ):
if self._clipboard_watcher_destination_page_urls == page:
self._clipboard_watcher_destination_page_urls = None
if self._clipboard_watcher_destination_page_watcher == page:
self._clipboard_watcher_destination_page_watcher = None
close_time = HydrusData.GetNow()
self._closed_pages.append( ( close_time, page ) )
self._controller.ClosePageKeys( page.GetPageKeys() )
self._menu_updater_pages.update()
self._menu_updater_undo.update()
def NotifyDeletedPage( self, page ):
self._DestroyPages( ( page, ) )
self._menu_updater_pages.update()
def NotifyNewExportFolders( self ):
self._menu_updater_file.update()
def NotifyNewImportFolders( self ):
self._menu_updater_file.update()
def NotifyNewOptions( self ):
self._menu_updater_database.update()
self._menu_updater_services.update()
def NotifyNewPages( self ):
self._menu_updater_pages.update()
def NotifyNewPending( self ):
self._menu_updater_pending.update()
def NotifyNewPermissions( self ):
self._menu_updater_pages.update()
self._menu_updater_services.update()
def NotifyNewServices( self ):
self._menu_updater_pages.update()
self._menu_updater_services.update()
def NotifyNewSessions( self ):
self._menu_updater_pages.update()
def NotifyNewUndo( self ):
self._menu_updater_undo.update()
def NotifyPendingUploadFinished( self, service_key: bytes ):
self._currently_uploading_pending.discard( service_key )
self._menu_updater_pending.update()
def PresentImportedFilesToPage( self, hashes, page_name ):
self._notebook.PresentImportedFilesToPage( hashes, page_name )
def ProcessApplicationCommand( self, command: CAC.ApplicationCommand ):
command_processed = True
if command.IsSimpleCommand():
action = command.GetSimpleAction()
if action == CAC.SIMPLE_EXIT_APPLICATION:
self.TryToExit()
elif action == CAC.SIMPLE_EXIT_APPLICATION_FORCE_MAINTENANCE:
self.TryToExit( force_shutdown_maintenance = True )
elif action == CAC.SIMPLE_RESTART_APPLICATION:
self.TryToExit( restart = True )
elif action == CAC.SIMPLE_HIDE_TO_SYSTEM_TRAY:
self.HideToSystemTray()
elif action == CAC.SIMPLE_MOVE_PAGES_SELECTION_LEFT:
self._notebook.MoveSelection( -1 )
elif action == CAC.SIMPLE_MOVE_PAGES_SELECTION_RIGHT:
self._notebook.MoveSelection( 1 )
elif action == CAC.SIMPLE_MOVE_PAGES_SELECTION_HOME:
self._notebook.MoveSelectionEnd( -1 )
elif action == CAC.SIMPLE_MOVE_PAGES_SELECTION_END:
self._notebook.MoveSelectionEnd( 1 )
elif action == CAC.SIMPLE_REFRESH:
self._Refresh()
elif action == CAC.SIMPLE_REFRESH_ALL_PAGES:
self._notebook.RefreshAllPages()
elif action == CAC.SIMPLE_REFRESH_PAGE_OF_PAGES_PAGES:
page = self._notebook.GetCurrentMediaPage()
if page is not None:
parent = page.GetParentNotebook()
parent.RefreshAllPages()
elif action == CAC.SIMPLE_NEW_PAGE:
self._notebook.ChooseNewPageForDeepestNotebook()
elif action == CAC.SIMPLE_NEW_PAGE_OF_PAGES:
self._notebook.NewPagesNotebook( on_deepest_notebook = True )
elif action == CAC.SIMPLE_NEW_DUPLICATE_FILTER_PAGE:
self._notebook.NewPageDuplicateFilter( on_deepest_notebook = True )
elif action == CAC.SIMPLE_NEW_GALLERY_DOWNLOADER_PAGE:
self._notebook.NewPageImportGallery( on_deepest_notebook = True )
elif action == CAC.SIMPLE_NEW_SIMPLE_DOWNLOADER_PAGE:
self._notebook.NewPageImportSimpleDownloader( on_deepest_notebook = True )
elif action == CAC.SIMPLE_NEW_URL_DOWNLOADER_PAGE:
self._notebook.NewPageImportURLs( on_deepest_notebook = True )
elif action == CAC.SIMPLE_NEW_WATCHER_DOWNLOADER_PAGE:
self._notebook.NewPageImportMultipleWatcher( on_deepest_notebook = True )
elif action == CAC.SIMPLE_CLOSE_PAGE:
self._notebook.CloseCurrentPage()
elif action == CAC.SIMPLE_UNCLOSE_PAGE:
self._UnclosePage()
elif action == CAC.SIMPLE_RUN_ALL_EXPORT_FOLDERS:
self._RunExportFolder()
elif action == CAC.SIMPLE_CHECK_ALL_IMPORT_FOLDERS:
self._CheckImportFolder()
elif action == CAC.SIMPLE_FLIP_DARKMODE:
self.FlipDarkmode()
elif action == CAC.SIMPLE_GLOBAL_AUDIO_MUTE:
ClientGUIMediaControls.SetMute( ClientGUIMediaControls.AUDIO_GLOBAL, True )
elif action == CAC.SIMPLE_GLOBAL_AUDIO_UNMUTE:
ClientGUIMediaControls.SetMute( ClientGUIMediaControls.AUDIO_GLOBAL, False )
elif action == CAC.SIMPLE_GLOBAL_AUDIO_MUTE_FLIP:
ClientGUIMediaControls.FlipMute( ClientGUIMediaControls.AUDIO_GLOBAL )
elif action == CAC.SIMPLE_GLOBAL_PROFILE_MODE_FLIP:
HG.client_controller.FlipProfileMode()
elif action == CAC.SIMPLE_GLOBAL_FORCE_ANIMATION_SCANBAR_SHOW:
HG.client_controller.new_options.FlipBoolean( 'force_animation_scanbar_show' )
elif action == CAC.SIMPLE_SHOW_HIDE_SPLITTERS:
self._ShowHideSplitters()
elif action == CAC.SIMPLE_SET_MEDIA_FOCUS:
self._SetMediaFocus()
elif action == CAC.SIMPLE_SET_SEARCH_FOCUS:
self._SetSearchFocus()
elif action == CAC.SIMPLE_REDO:
self._controller.pub( 'redo' )
elif action == CAC.SIMPLE_UNDO:
self._controller.pub( 'undo' )
elif action == CAC.SIMPLE_FLIP_DEBUG_FORCE_IDLE_MODE_DO_NOT_SET_THIS:
self._SwitchBoolean( 'force_idle_mode' )
else:
command_processed = False
else:
command_processed = False
return command_processed
def ProposeSaveGUISession( self, name = None, suggested_name = '', notebook = None ):
if notebook is None:
notebook = self._notebook
if name is None:
while True:
with ClientGUIDialogs.DialogTextEntry( self, 'Enter a name for the new session.', default = suggested_name ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
name = dlg.GetValue()
if name in ClientGUISession.RESERVED_SESSION_NAMES:
QW.QMessageBox.critical( self, 'Error', 'Sorry, you cannot have that name! Try another.' )
else:
existing_session_names = self._controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_CONTAINER )
if name in existing_session_names:
message = 'Session "{}" already exists! Do you want to overwrite it?'.format( name )
( result, closed_by_user ) = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Overwrite existing session?', yes_label = 'yes, overwrite', no_label = 'no, choose another name', check_for_cancelled = True )
if closed_by_user:
return
elif result == QW.QDialog.Rejected:
continue
break
else:
return
elif name not in ClientGUISession.RESERVED_SESSION_NAMES: # i.e. a human asked to do this
message = 'Overwrite this session?'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Overwrite existing session?', yes_label = 'yes, overwrite', no_label = 'no' )
if result != QW.QDialog.Accepted:
return
#
only_changed_page_data = True
about_to_save = True
session = notebook.GetCurrentGUISession( name, only_changed_page_data, about_to_save )
self._FleshOutSessionWithCleanDataIfNeeded( notebook, name, session )
self._controller.CallToThread( self._controller.SaveGUISession, session )
def RefreshStatusBar( self ):
self._RefreshStatusBar()
def RegisterAnimationUpdateWindow( self, window ):
self._animation_update_windows.add( window )
if self._animation_update_timer is not None and not self._animation_update_timer.isActive():
self._animation_update_timer.setInterval( 5 )
self._animation_update_timer.start()
def RegisterCanvasFrameReference( self, frame ):
self._canvas_frames = [ fr for fr in self._canvas_frames if QP.isValid( fr ) ]
self._canvas_frames.append( frame )
def RegisterUIUpdateWindow( self, window ):
self._ui_update_windows.add( window )
if self._ui_update_repeating_job is None:
self._ui_update_repeating_job = self._controller.CallRepeatingQtSafe( self, 0.0, 0.1, 'repeating ui update', self.REPEATINGUIUpdate )
def ReleaseMPVWidget( self, mpv_widget ):
mpv_widget.setParent( self )
self._persistent_mpv_widgets.append( mpv_widget )
def REPEATINGBandwidth( self ):
global_tracker = self._controller.network_engine.bandwidth_manager.GetMySessionTracker()
boot_time = self._controller.GetBootTime()
time_since_boot = max( 1, HydrusData.GetNow() - boot_time )
usage_since_boot = global_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, time_since_boot )
bandwidth_status = HydrusData.ToHumanBytes( usage_since_boot )
current_usage = global_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1, for_user = True )
if current_usage > 0:
bandwidth_status += ' (' + HydrusData.ToHumanBytes( current_usage ) + '/s)'
if HC.options[ 'pause_subs_sync' ]:
bandwidth_status += ', subs paused'
if self._controller.new_options.GetBoolean( 'pause_all_new_network_traffic' ):
bandwidth_status += ', network paused'
tooltip = 'total bandwidth used this session, and current use'
self._statusbar.SetStatusText( bandwidth_status, 1, tooltip = tooltip )
def REPEATINGClipboardWatcher( self ):
allow_watchers = self._controller.new_options.GetBoolean( 'watch_clipboard_for_watcher_urls' )
allow_other_recognised_urls = self._controller.new_options.GetBoolean( 'watch_clipboard_for_other_recognised_urls' )
if not ( allow_watchers or allow_other_recognised_urls ):
self._BootOrStopClipboardWatcherIfNeeded()
return
try:
text = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing:
text = ''
except Exception as e:
HydrusData.ShowText( 'Could not access the clipboard: {}'.format( e ) )
self._clipboard_watcher_repeating_job.Cancel()
self._clipboard_watcher_repeating_job = None
return
if text != self._last_clipboard_watched_text:
self._last_clipboard_watched_text = text
for possible_url in HydrusText.DeserialiseNewlinedTexts( text ):
if not possible_url.startswith( 'http' ):
continue
try:
self._ImportURL( possible_url, show_destination_page = False, allow_watchers = allow_watchers, allow_other_recognised_urls = allow_other_recognised_urls, allow_unrecognised_urls = False )
except HydrusExceptions.URLClassException:
pass
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'Could not find a new page to place the clipboard URL. Perhaps the client is at its page limit.' )
break
def REPEATINGPageUpdate( self ):
page = self.GetCurrentPage()
if page is not None:
if HG.profile_mode:
summary = 'Profiling page timer: ' + repr( page )
HydrusData.Profile( summary, 'page.REPEATINGPageUpdate()', globals(), locals(), min_duration_ms = HG.ui_timer_profile_min_job_time_ms )
else:
page.REPEATINGPageUpdate()
if len( self._pending_modal_job_keys ) > 0:
# another safety thing. normally modal lads are shown immediately, no problem, but sometimes they can be delayed
job_key = self._pending_modal_job_keys.pop()
self._controller.pub( 'modal_message', job_key )
def REPEATINGUIUpdate( self ):
for window in list( self._ui_update_windows ):
if not QP.isValid( window ):
self._ui_update_windows.discard( window )
continue
tlw = window.window()
if not tlw or not QP.isValid( tlw ):
self._ui_update_windows.discard( window )
continue
try:
if HG.profile_mode:
summary = 'Profiling ui update timer: ' + repr( window )
HydrusData.Profile( summary, 'window.TIMERUIUpdate()', globals(), locals(), min_duration_ms = HG.ui_timer_profile_min_job_time_ms )
else:
window.TIMERUIUpdate()
except Exception as e:
self._ui_update_windows.discard( window )
HydrusData.ShowException( e )
if len( self._ui_update_windows ) == 0:
self._ui_update_repeating_job.Cancel()
self._ui_update_repeating_job = None
def ReportFreshSessionLoaded( self, gui_session: ClientGUISession.GUISessionContainer ):
if gui_session.GetName() == CC.LAST_SESSION_SESSION_NAME:
self._controller.ReportLastSessionLoaded( gui_session )
def ReplaceMenu( self, name, menu_or_none, label ):
# this is now way more complicated than I generally need, but I'll hang on to it for the moment
if menu_or_none is not None:
menu_or_none.menuAction().setProperty( 'hydrus_menubar_name', name )
menu_or_none.setTitle( label )
old_menu_index = self._FindMenuBarIndex( name )
if old_menu_index == -1:
if menu_or_none is not None:
menu = menu_or_none
insert_index = 0
# for every menu that may display, if it is displayed now, bump up insertion index up one
for possible_name in MENU_ORDER:
if possible_name == name:
break
possible_menu_index = self._FindMenuBarIndex( possible_name )
if possible_menu_index != -1:
insert_index += 1
if len( self._menubar.actions() ) > insert_index:
action_before = self._menubar.actions()[ insert_index ]
else:
action_before = None
menu.setParent( self )
self._menubar.insertMenu( action_before, menu )
else:
old_action = self._menubar.actions()[ old_menu_index ]
old_menu = old_action.menu()
if menu_or_none is not None:
menu = menu_or_none
menu.setParent( self )
self._menubar.insertMenu( old_action, menu )
self._menubar.removeAction( old_action )
else:
self._menubar.removeAction( old_action )
ClientGUIMenus.DestroyMenu( old_menu )
def RestoreOrActivateWindow( self ):
if self.isMinimized():
if self._was_maximised:
self.showMaximized()
else:
self.showNormal()
else:
self.activateWindow()
def SaveAndHide( self ):
if self._done_save_and_close:
return
HG.client_controller.pub( 'pause_all_media' )
try:
if QP.isValid( self._message_manager ):
self._message_manager.CleanBeforeDestroy()
self._message_manager.hide()
#
if self._have_shown_once:
if self._new_options.GetBoolean( 'saving_sash_positions_on_exit' ):
self._SaveSplitterPositions()
ClientGUITopLevelWindows.SaveTLWSizeAndPosition( self, self._frame_key )
for tlw in QW.QApplication.topLevelWidgets():
if not isinstance( tlw, ClientGUISplash.FrameSplash ):
tlw.hide()
if self._have_system_tray_icon:
self._system_tray_icon.hide()
#
only_changed_page_data = True
about_to_save = True
session = self._notebook.GetCurrentGUISession( CC.LAST_SESSION_SESSION_NAME, only_changed_page_data, about_to_save )
session = self._FleshOutSessionWithCleanDataIfNeeded( self._notebook, CC.LAST_SESSION_SESSION_NAME, session )
self._controller.SaveGUISession( session )
session.SetName( CC.EXIT_SESSION_SESSION_NAME )
self._controller.SaveGUISession( session )
#
self._DestroyTimers()
self.DeleteAllClosedPages()
self._notebook.CleanBeforeDestroy()
self._controller.WriteSynchronous( 'save_options', HC.options )
self._controller.WriteSynchronous( 'serialisable', self._new_options )
self._done_save_and_close = True
except Exception as e:
HydrusData.PrintException( e )
def SetMediaFocus( self ):
self._SetMediaFocus()
def SetStatusBarDirty( self ):
self._statusbar_thread_updater.Update()
def ShowPage( self, page_key ):
page = self._notebook.GetPageFromPageKey( page_key )
if page is None:
raise HydrusExceptions.DataMissing( 'Could not find that page!' )
self._notebook.ShowPage( page )
def TryToExit( self, restart = False, force_shutdown_maintenance = False ):
if not self._controller.DoingFastExit():
able_to_close_statement = self._notebook.GetTestAbleToCloseStatement()
if HC.options[ 'confirm_client_exit' ] or able_to_close_statement is not None:
if restart:
text = 'Are you sure you want to restart the client? (Will auto-yes in 15 seconds)'
else:
text = 'Are you sure you want to exit the client? (Will auto-yes in 15 seconds)'
if able_to_close_statement is not None:
text += os.linesep * 2
text += able_to_close_statement
result = ClientGUIDialogsQuick.GetYesNo( self, text, auto_yes_time = 15 )
if result == QW.QDialog.Rejected:
return
if restart:
HG.restart = True
if force_shutdown_maintenance or HG.do_idle_shutdown_work:
HG.do_idle_shutdown_work = True
else:
try:
idle_shutdown_action = self._controller.options[ 'idle_shutdown' ]
last_shutdown_work_time = self._controller.Read( 'last_shutdown_work_time' )
shutdown_work_period = self._controller.new_options.GetInteger( 'shutdown_work_period' )
shutdown_work_due = HydrusData.TimeHasPassed( last_shutdown_work_time + shutdown_work_period )
if shutdown_work_due:
if idle_shutdown_action == CC.IDLE_ON_SHUTDOWN:
HG.do_idle_shutdown_work = True
elif idle_shutdown_action == CC.IDLE_ON_SHUTDOWN_ASK_FIRST:
idle_shutdown_max_minutes = self._controller.options[ 'idle_shutdown_max_minutes' ]
time_to_stop = HydrusData.GetNow() + ( idle_shutdown_max_minutes * 60 )
work_to_do = self._controller.GetIdleShutdownWorkDue( time_to_stop )
if len( work_to_do ) > 0:
text = 'Is now a good time for the client to do up to ' + HydrusData.ToHumanInt( idle_shutdown_max_minutes ) + ' minutes\' maintenance work? (Will auto-no in 15 seconds)'
text += os.linesep * 2
if HG.client_controller.IsFirstStart():
text += 'Since this is your first session, this maintenance should should just be some quick initialisation work. It should only take a few seconds.'
text += os.linesep * 2
text += 'The outstanding jobs appear to be:'
text += os.linesep * 2
text += os.linesep.join( work_to_do )
( result, was_cancelled ) = ClientGUIDialogsQuick.GetYesNo( self, text, title = 'Maintenance is due', auto_no_time = 15, check_for_cancelled = True )
if was_cancelled:
return
elif result == QW.QDialog.Accepted:
HG.do_idle_shutdown_work = True
else:
# if they said no, don't keep asking
self._controller.Write( 'register_shutdown_work' )
except Exception as e:
self._controller.SafeShowCriticalMessage( 'shutdown error', 'There was a problem trying to review pending shutdown maintenance work. No shutdown maintenance work will be done, and info has been written to the log. Please let hydev know.' )
HydrusData.PrintException( e )
HG.do_idle_shutdown_work = False
QP.CallAfter( self._controller.Exit )
def TryToOpenManageServicesForAutoAccountCreation( self, service_key: bytes ):
self._ManageServices( auto_account_creation_service_key = service_key )
def UnregisterAnimationUpdateWindow( self, window ):
self._animation_update_windows.discard( window )
def UnregisterUIUpdateWindow( self, window ):
self._ui_update_windows.discard( window )
| 42.443209 | 1,694 | 0.560481 |
794516c5be526ec02c38c2085c25823f9cca38a4 | 11,687 | py | Python | reference_code/GSNet-release/demo/predictor.py | lkeab/gsnet | b150d13543cda61b1b736ac6ae911d2e0ab9663f | [
"MIT"
] | 78 | 2020-08-02T12:21:31.000Z | 2022-03-31T10:43:59.000Z | reference_code/GSNet-release/demo/predictor.py | PionnerLC/gsnet | 69c418fd5c8ec9ee90b4298888f59d9ce5b37749 | [
"MIT"
] | 15 | 2020-09-08T18:47:47.000Z | 2022-03-11T14:37:44.000Z | reference_code/GSNet-release/demo/predictor.py | PionnerLC/gsnet | 69c418fd5c8ec9ee90b4298888f59d9ce5b37749 | [
"MIT"
] | 18 | 2021-02-27T17:55:45.000Z | 2022-03-31T05:16:12.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
import os
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
from PIL import Image
from detectron2.layers.nms import batched_nms
import soft_renderer as sr
import json
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image, save_name):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
mask = predictions['instances'].raw_masks.squeeze(1).data.cpu().numpy() if predictions['instances'].has("raw_masks") else None
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
pred_classes = torch.ones(instances.pred_classes.shape)
# uncomment to open nms between different classes
'''
res = batched_nms(instances.pred_boxes.tensor, instances.scores, pred_classes, 0.5)
print('res:', res)
print('res:', res.size()[0])
#instances.num_instances = res.size()[0]
instances.pred_boxes.tensor = instances.pred_boxes.tensor[res]
instances.pred_classes = instances.pred_classes[res]
instances.scores = instances.scores[res]
instances.pred_keypoints = instances.pred_keypoints[res]
instances.predict_trans = instances.predict_trans[res]
instances.predict_rotation = instances.predict_rotation[res]
instances.predict_vertices = instances.predict_vertices[res]
print('pred trans shape:', instances.predict_trans.shape)
'''
vis_output = visualizer.draw_instance_predictions(predictions=instances)
output_trans_dir = './inference_val_translation/'
output_rotation_dir = './inference_val_rotation/'
output_mesh_dir = './inference_val_mesh/'
output_cls_dir = './inference_val_cls/'
output_score_dir = './inference_val_score/'
save_name = save_name.split('/')[1]
template_path = './merge_mean_car_shape/'
faces = sr.Mesh.from_obj(template_path+'merge_mean_car_model_0.obj').faces
for directory in [output_trans_dir, output_rotation_dir, output_mesh_dir, output_cls_dir, output_score_dir]:
if not os.path.exists(directory):
os.makedirs(directory)
for index in range(instances.predict_trans.shape[0]):
with open(os.path.join(output_trans_dir, save_name +'_' +str(index)+'.json'),'w') as f:
data = {}
data['translation'] = list(instances.predict_trans[index].cpu().detach().numpy().astype(float))
json.dump(data, f)
for index in range(instances.predict_rotation.shape[0]):
with open(os.path.join(output_rotation_dir, save_name +'_' +str(index)+'.json'),'w') as f:
data = {}
data['rotation'] = list(instances.predict_rotation[index].cpu().detach().numpy().astype(float))
json.dump(data, f)
for index in range(instances.pred_classes.shape[0]):
with open(os.path.join(output_cls_dir, save_name +'_' +str(index)+'.json'),'w') as f:
data = {}
data['car_id'] = int(instances.pred_classes[index].cpu().detach().numpy().astype(float))
json.dump(data, f)
for index in range(instances.scores.shape[0]):
with open(os.path.join(output_score_dir, save_name +'_' +str(index)+'.json'),'w') as f:
data = {}
data['score'] = float(instances.scores[index].cpu().detach().numpy().astype(float))
json.dump(data, f)
for index in range(instances.predict_vertices.shape[0]):
vertices = instances.predict_vertices[index].unsqueeze(0)
sr.Mesh(vertices, faces).save_obj(os.path.join(output_mesh_dir, save_name+'_' + str(index) + '.obj'), save_texture=False)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
| 40.161512 | 141 | 0.584838 |
79451726e6774d2a9dbb666536a6f1385fe5b2fb | 4,331 | py | Python | sidebar/sidebar.py | letyndr/dash | 3ae8cfb9ea67eccc96e0f7a726a87616bb9e19b5 | [
"MIT"
] | null | null | null | sidebar/sidebar.py | letyndr/dash | 3ae8cfb9ea67eccc96e0f7a726a87616bb9e19b5 | [
"MIT"
] | null | null | null | sidebar/sidebar.py | letyndr/dash | 3ae8cfb9ea67eccc96e0f7a726a87616bb9e19b5 | [
"MIT"
] | null | null | null | """
This app creates a responsive sidebar layout with dash-bootstrap-components and
some custom css with media queries.
When the screen is small, the sidebar moved to the top of the page, and the
links get hidden in a collapse element. We use a callback to toggle the
collapse when on a small screen, and the custom CSS to hide the toggle, and
force the collapse to stay open when the screen is large.
dcc.Location is used to track the current location, a callback uses the current
location to render the appropriate page content. The active prop of each
NavLink is set automatically according to the current pathname. To use this
feature you must install dash-bootstrap-components >= 0.11.0.
For more details on building multi-page Dash applications, check out the Dash
documentation: https://dash.plot.ly/urls
"""
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
app = dash.Dash(
external_stylesheets=[dbc.themes.BOOTSTRAP],
# these meta_tags ensure content is scaled correctly on different devices
# see: https://www.w3schools.com/css/css_rwd_viewport.asp for more
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
],
)
# we use the Row and Col components to construct the sidebar header
# it consists of a title, and a toggle, the latter is hidden on large screens
sidebar_header = dbc.Row(
[
dbc.Col(html.H2("Sidebar", className="display-4")),
dbc.Col(
html.Button(
# use the Bootstrap navbar-toggler classes to style the toggle
html.Span(className="navbar-toggler-icon"),
className="navbar-toggler",
# the navbar-toggler classes don't set color, so we do it here
style={
"color": "rgba(0,0,0,.5)",
"border-color": "rgba(0,0,0,.1)",
},
id="toggle",
),
# the column containing the toggle will be only as wide as the
# toggle, resulting in the toggle being right aligned
width="auto",
# vertically align the toggle in the center
align="center",
),
]
)
sidebar = html.Div(
[
sidebar_header,
# we wrap the horizontal rule and short blurb in a div that can be
# hidden on a small screen
html.Div(
[
html.Hr(),
html.P(
"A responsive sidebar layout with collapsible navigation "
"links.",
className="lead",
),
],
id="blurb",
),
# use the Collapse component to animate hiding / revealing links
dbc.Collapse(
dbc.Nav(
[
dbc.NavLink("Home", href="/", active="exact"),
dbc.NavLink("Page 1", href="/page-1", active="exact"),
dbc.NavLink("Page 2", href="/page-2", active="exact"),
],
vertical=True,
pills=True,
),
id="collapse",
),
],
id="sidebar",
)
content = html.Div(id="page-content")
app.layout = html.Div([dcc.Location(id="url"), sidebar, content])
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def render_page_content(pathname):
if pathname == "/":
return html.P("This is the content of the home page!")
elif pathname == "/page-1":
return html.P("This is the content of page 1. Yay!")
elif pathname == "/page-2":
return html.P("Oh cool, this is page 2!")
# If the user tries to reach a different page, return a 404 message
return dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognised..."),
]
)
@app.callback(
Output("collapse", "is_open"),
[Input("toggle", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
if __name__ == "__main__":
app.run_server(port=8888, debug=True) | 34.102362 | 79 | 0.596629 |
7945182a18c2a2f0af00967776796529b9cd1955 | 122 | py | Python | src/TB3Util/tb3exporter.py | csvance/tb3util | 63191c4e9fddec809d3943f9ee402eae0e2d4659 | [
"MIT"
] | null | null | null | src/TB3Util/tb3exporter.py | csvance/tb3util | 63191c4e9fddec809d3943f9ee402eae0e2d4659 | [
"MIT"
] | null | null | null | src/TB3Util/tb3exporter.py | csvance/tb3util | 63191c4e9fddec809d3943f9ee402eae0e2d4659 | [
"MIT"
] | null | null | null | class TB3Exporter:
def __init__(self,export_path):
self.path = export_path
def export_bank(self,bank):
assert(False) | 24.4 | 32 | 0.770492 |
7945183e040b2fd5479453d0c7d9a7393b88eb6a | 748 | py | Python | setup.py | cnicol-gwlogic/pyemu | f1624da68f59f6ae8be54ddba9d39982cc7f3b1c | [
"BSD-3-Clause"
] | 41 | 2020-06-04T12:53:37.000Z | 2022-03-30T17:56:13.000Z | setup.py | cnicol-gwlogic/pyemu | f1624da68f59f6ae8be54ddba9d39982cc7f3b1c | [
"BSD-3-Clause"
] | 184 | 2020-05-29T14:25:23.000Z | 2022-03-29T04:01:42.000Z | setup.py | cnicol-gwlogic/pyemu | f1624da68f59f6ae8be54ddba9d39982cc7f3b1c | [
"BSD-3-Clause"
] | 40 | 2020-05-29T14:31:06.000Z | 2022-03-23T02:41:19.000Z | import sys
#from distutils.core import setup
from setuptools import setup
long_description = \
"""pyemu is a set of python modules for interfacing with PEST and PEST++.
"""
setup(name="pyemu",
description=long_description,
long_description=long_description,
author="Jeremy White, Mike Fienen,Brioch Hemmings",
author_email='[email protected],[email protected],[email protected]',
url='https://github.com/pypest/pyemu',
download_url = 'https://github.com/jtwhite79/pyemu/tarball/1.1.0',
license='New BSD',
platforms='Windows, Mac OS-X, Linux',
packages = ["pyemu","pyemu.pst","pyemu.plot","pyemu.utils","pyemu.mat","pyemu.prototypes"],
version="1.1.0")
| 37.4 | 98 | 0.669786 |
79451a19bfe6f6526876428bdb5cd047da930e37 | 1,008 | py | Python | landlab/framework/decorators.py | amanaster2/landlab | ea17f8314eb12e3fc76df66c9b6ff32078caa75c | [
"MIT"
] | 257 | 2015-01-13T16:01:21.000Z | 2022-03-29T22:37:43.000Z | landlab/framework/decorators.py | amanaster2/landlab | ea17f8314eb12e3fc76df66c9b6ff32078caa75c | [
"MIT"
] | 1,222 | 2015-02-05T21:36:53.000Z | 2022-03-31T17:53:49.000Z | landlab/framework/decorators.py | amanaster2/landlab | ea17f8314eb12e3fc76df66c9b6ff32078caa75c | [
"MIT"
] | 274 | 2015-02-11T19:56:08.000Z | 2022-03-28T23:31:07.000Z | #! /usr/bin/env python
"""Decorators for TheLandlab package."""
import re
def camel_case(text, sep=None):
"""Convert to camel case.
Convert *text* to camel case. Use the *sep* keyword to specify the word
separator. The default is to split on whitespace.
>>> from landlab.framework.decorators import camel_case
>>> camel_case("eric idle")
'EricIdle'
>>> camel_case("terry_gilliam", sep="_")
'TerryGilliam'
>>> camel_case("MONTY Python")
'MONTYPython'
>>> camel_case("GrahamChapman")
'GrahamChapman'
"""
return "".join([word[0].upper() + word[1:] for word in text.split(sep)])
def snake_case(text):
"""Convert camel case to snake case.
Examples
--------
>>> from landlab.framework.decorators import snake_case
>>> snake_case("EricIdle")
'eric_idle'
>>> snake_case("MONTYPython")
'monty_python'
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", text)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
| 26.526316 | 76 | 0.614087 |
79451a9103f175f4eec6abdf1f1f27c28ec34494 | 1,119 | py | Python | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py | BinaryTreeNode/KDS | 6220475814b42733c86ac0005e8548bb9a481c75 | [
"Apache-2.0"
] | 1 | 2020-07-02T14:53:18.000Z | 2020-07-02T14:53:18.000Z | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py | BinaryTreeNode/KDS | 6220475814b42733c86ac0005e8548bb9a481c75 | [
"Apache-2.0"
] | null | null | null | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py | BinaryTreeNode/KDS | 6220475814b42733c86ac0005e8548bb9a481c75 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = '{} --version'.format(bin_name).split()
version_string = subprocess.check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| 31.083333 | 75 | 0.681859 |
79451afb03cd1792d1aa3f6b1d7531a81acd9ec6 | 10,880 | py | Python | meta_learning_without_memorization/pose_code/maml_bbb_2.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
] | 6 | 2019-12-16T04:23:57.000Z | 2021-12-09T14:17:14.000Z | meta_learning_without_memorization/pose_code/maml_bbb_2.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
] | 13 | 2020-01-28T22:19:53.000Z | 2022-02-10T00:39:26.000Z | meta_learning_without_memorization/pose_code/maml_bbb_2.py | ZachT1711/google-research | 662e6837a3efa0c40b11cb4122447c4b028d2115 | [
"Apache-2.0"
] | 1 | 2019-12-19T09:44:55.000Z | 2019-12-19T09:44:55.000Z | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for maml_bbb.py."""
from __future__ import print_function
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.layers.python import layers as tf_layers
FLAGS = flags.FLAGS
## Network helpers
def conv_block(x, weight, bias, reuse, scope):
# conv
x = tf.nn.conv2d(x, weight, [1, 1, 1, 1], 'SAME') + bias
# batch norm
x = tf_layers.batch_norm(
x, activation_fn=tf.nn.relu, reuse=reuse, scope=scope)
# # pooling
# x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
return x
## Loss functions
def mse(pred, label):
pred = tf.reshape(pred, [-1])
label = tf.reshape(label, [-1])
return tf.reduce_mean(tf.square(pred - label))
class MAML(object):
"""MAML algo object."""
def __init__(self, encoder_w, dim_input=1, dim_output=1):
"""Must call construct_model() after initializing MAML."""
self.beta = tf.placeholder_with_default(FLAGS.beta, ())
self.dim_input = dim_input
self.dim_output = dim_output
self.update_lr = FLAGS.update_lr
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
self.loss_func = mse
self.encoder_w = encoder_w
self.dim_hidden = FLAGS.num_filters
self.forward = self.forward_conv
self.construct_weights = self.construct_conv_weights
self.channels = 1
self.img_size = int(np.sqrt(self.dim_input / self.channels))
def construct_model(self,
input_tensors=None,
prefix='metatrain_',
test_num_updates=0):
"""a: training data for inner gradient, b: test data for meta gradient."""
self.inputa = input_tensors['inputa']
self.inputb = input_tensors['inputb']
self.labela = input_tensors['labela']
self.labelb = input_tensors['labelb']
with tf.variable_scope('model', reuse=None) as training_scope:
if 'weights' in dir(self):
training_scope.reuse_variables()
weights = self.weights
else:
# Define the weights
self.weights = weights = self.construct_weights()
# outputbs[i] and lossesb[i] is the output and loss after i+1 gradient
# updates
num_updates = max(test_num_updates, FLAGS.num_updates)
def task_metalearn(inp, reuse=True):
"""Run meta learning."""
TRAIN = 'train' in prefix # pylint: disable=invalid-name
# Perform gradient descent for one task in the meta-batch.
inputa, inputb, labela, labelb = inp
task_outputbs, task_lossesb = [], []
task_msesb = []
# support_pred and loss, (n_data_per_task, out_dim)
task_outputa = self.forward(
inputa, weights, reuse=reuse) # only not reuse on the first iter
# labela is (n_data_per_task, out_dim)
task_lossa = self.loss_func(task_outputa, labela)
# INNER LOOP (no change with ib)
grads = tf.gradients(task_lossa, list(weights.values()))
if FLAGS.stop_grad:
grads = [tf.stop_gradient(grad) for grad in grads]
gradients = dict(zip(weights.keys(), grads))
# theta_pi = theta - alpha * grads
fast_weights = dict(
zip(weights.keys(), [
weights[key] - self.update_lr * gradients[key]
for key in weights.keys()
]))
# use theta_pi to forward meta-test
output = self.forward(inputb, weights, reuse=True)
task_outputbs.append(output)
# meta-test loss
task_kl_loss = sum(self.encoder_w.losses)
task_msesb.append(self.loss_func(output, labelb))
task_lossesb.append(
self.loss_func(output, labelb) + self.beta * task_kl_loss)
def while_body(fast_weights_values):
"""Update params."""
loss = self.loss_func(
self.forward(
inputa,
dict(zip(fast_weights.keys(), fast_weights_values)),
reuse=True), labela)
grads = tf.gradients(loss, fast_weights_values)
fast_weights_values = [
v - self.update_lr * g for v, g in zip(fast_weights_values, grads)
]
return fast_weights_values
fast_weights_values = tf.while_loop(
lambda _: True,
while_body,
loop_vars=[fast_weights.values()],
maximum_iterations=num_updates - 1,
back_prop=TRAIN)
fast_weights = dict(zip(fast_weights.keys(), fast_weights_values))
output = self.forward(inputb, fast_weights, reuse=True)
task_outputbs.append(output)
task_msesb.append(self.loss_func(output, labelb))
task_lossesb.append(
self.loss_func(output, labelb) + self.beta * task_kl_loss)
task_output = [
task_outputa, task_outputbs, task_lossa, task_lossesb, task_msesb
]
return task_output
if FLAGS.norm is not None:
# to initialize the batch norm vars, might want to combine this, and
# not run idx 0 twice.
_ = task_metalearn(
(self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]),
False)
out_dtype = [
tf.float32, [tf.float32] * 2, tf.float32, [tf.float32] * 2,
[tf.float32] * 2
]
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, \
self.labela, self.labelb), dtype=out_dtype, \
parallel_iterations=FLAGS.meta_batch_size)
outputas, outputbs, lossesa, lossesb, msesb = result
## Performance & Optimization
if 'train' in prefix:
# lossesa is length(meta_batch_size)
self.total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(
FLAGS.meta_batch_size)
self.total_losses2 = total_losses2 = [
tf.reduce_sum(msesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(msesb))
]
self.total_losses3 = total_losses3 = [
tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(lossesb))
]
# after the map_fn
self.outputas, self.outputbs = outputas, outputbs
# OUTER LOOP
if FLAGS.metatrain_iterations > 0:
optimizer = tf.train.AdamOptimizer(self.meta_lr)
THETA = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model') # pylint: disable=invalid-name
PHI = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder') # pylint: disable=invalid-name
self.gvs_theta = gvs_theta = optimizer.compute_gradients(
self.total_losses2[-1], THETA)
metatrain_theta_op = optimizer.apply_gradients(gvs_theta)
self.gvs_phi = gvs_phi = optimizer.compute_gradients(
self.total_losses3[-1], PHI)
metatrain_phi_op = optimizer.apply_gradients(gvs_phi)
with tf.control_dependencies([metatrain_theta_op, metatrain_phi_op]):
self.metatrain_op = tf.no_op()
scale_v = [
v for v in self.encoder_w.trainable_variables if 'scale' in v.name
]
scale_norm = [tf.reduce_mean(v) for v in scale_v]
scale_norm = tf.reduce_mean(scale_norm)
tf.summary.scalar(prefix + 'full_loss', total_losses3[-1])
tf.summary.scalar(prefix + 'regularizer',
total_losses3[-1] - total_losses2[-1])
tf.summary.scalar(prefix + 'untransformed_scale', scale_norm)
else:
self.metaval_total_loss1 = tf.reduce_sum(
lossesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_losses2 = total_losses2 = [
tf.reduce_sum(msesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(msesb))
]
self.metaval_total_losses3 = total_losses3 = [
tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(len(lossesb))
]
tf.summary.scalar(prefix + 'Pre-mse', total_losses2[0])
tf.summary.scalar(prefix + 'Post-mse_' + str(num_updates),
total_losses2[-1])
def construct_conv_weights(self):
"""Construct conv weights."""
weights = {}
dtype = tf.float32
conv_initializer = contrib_layers.xavier_initializer_conv2d(dtype=dtype)
k = 3
weights['conv1'] = tf.get_variable(
'conv1', [k, k, self.channels, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv2'] = tf.get_variable(
'conv2', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b2'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv3'] = tf.get_variable(
'conv3', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b3'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv4'] = tf.get_variable(
'conv4', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b4'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['w5'] = tf.Variable(
tf.random_normal([self.dim_hidden, self.dim_output]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
return weights
def forward_conv(self, inp, weights, reuse=False, scope=''):
"""Forward conv."""
# reuse is for the normalization parameters.
channels = self.channels
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])
hidden1 = conv_block(inp, weights['conv1'], weights['b1'], reuse,
scope + '0')
hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], reuse,
scope + '1')
hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], reuse,
scope + '2')
hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], reuse,
scope + '3')
# last hidden layer is 6x6x64-ish, reshape to a vector
hidden4 = tf.reduce_mean(hidden4, [1, 2])
return tf.matmul(hidden4, weights['w5']) + weights['b5']
| 37.777778 | 111 | 0.631893 |
79451c218aa2954fde5b781dbf86872556f588e0 | 41,046 | py | Python | custodian/qchem/tests/test_jobs.py | utf/custodian | 148d21e2bbcc0222122a287582d27021f20d9452 | [
"MIT"
] | 80 | 2015-04-04T18:53:40.000Z | 2022-03-16T05:58:31.000Z | custodian/qchem/tests/test_jobs.py | utf/custodian | 148d21e2bbcc0222122a287582d27021f20d9452 | [
"MIT"
] | 111 | 2015-09-08T20:49:36.000Z | 2022-03-25T05:56:01.000Z | custodian/qchem/tests/test_jobs.py | utf/custodian | 148d21e2bbcc0222122a287582d27021f20d9452 | [
"MIT"
] | 102 | 2015-01-07T10:25:46.000Z | 2022-02-08T22:52:16.000Z | # coding: utf-8
from __future__ import unicode_literals, division
import os
import shutil
from unittest import TestCase
try:
from unittest.mock import patch # type: ignore
except ImportError:
from mock import patch # type: ignore
import unittest
from custodian.qchem.jobs import QCJob
from pymatgen.io.qchem.inputs import QCInput
__author__ = "Samuel Blau"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Samuel Blau"
__email__ = "[email protected]"
__status__ = "Alpha"
__date__ = "6/6/18"
__credits__ = "Shyam Dwaraknath"
test_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "test_files", "qchem", "new_test_files"
)
scr_dir = os.path.join(test_dir, "scr")
cwd = os.getcwd()
class QCJobTest(TestCase):
def test_defaults(self):
with patch("custodian.qchem.jobs.shutil.copy") as copy_patch:
myjob = QCJob(qchem_command="qchem", max_cores=32)
self.assertEqual(myjob.current_command, "qchem -nt 32 mol.qin mol.qout scratch")
myjob.setup()
self.assertEqual(copy_patch.call_args_list[0][0][0], "mol.qin")
self.assertEqual(copy_patch.call_args_list[0][0][1], "mol.qin.orig")
self.assertEqual(os.environ["QCSCRATCH"], os.getcwd())
self.assertEqual(os.environ["QCTHREADS"], "32")
self.assertEqual(os.environ["OMP_NUM_THREADS"], "32")
def test_not_defaults(self):
myjob = QCJob(
qchem_command="qchem -slurm",
multimode="mpi",
input_file="different.qin",
output_file="not_default.qout",
max_cores=12,
calc_loc="/not/default/",
backup=False,
)
self.assertEqual(myjob.current_command, "qchem -slurm -np 12 different.qin not_default.qout scratch")
myjob.setup()
self.assertEqual(os.environ["QCSCRATCH"], os.getcwd())
self.assertEqual(os.environ["QCLOCALSCR"], "/not/default/")
def test_save_scratch(self):
with patch("custodian.qchem.jobs.shutil.copy") as copy_patch:
myjob = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
calc_loc="/tmp/scratch",
save_scratch=True,
)
self.assertEqual(
myjob.current_command,
"qchem -slurm -nt 32 mol.qin mol.qout scratch",
)
myjob.setup()
self.assertEqual(copy_patch.call_args_list[0][0][0], "mol.qin")
self.assertEqual(copy_patch.call_args_list[0][0][1], "mol.qin.orig")
self.assertEqual(os.environ["QCSCRATCH"], os.getcwd())
self.assertEqual(os.environ["QCTHREADS"], "32")
self.assertEqual(os.environ["OMP_NUM_THREADS"], "32")
self.assertEqual(os.environ["QCLOCALSCR"], "/tmp/scratch")
class OptFFTest(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "FF_working/test.qin"),
os.path.join(scr_dir, "test.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_working/test.qout.opt_0"),
os.path.join(scr_dir, "test.qout.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_working/test.qout.freq_0"),
os.path.join(scr_dir, "test.qout.freq_0"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_working/test.qout.opt_1"),
os.path.join(scr_dir, "test.qout.opt_1"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_working/test.qout.freq_1"),
os.path.join(scr_dir, "test.qout.freq_1"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem",
max_cores=32,
input_file="test.qin",
output_file="test.qout",
linked=False,
)
expected_next = QCJob(
qchem_command="qchem",
max_cores=32,
multimode="openmp",
input_file="test.qin",
output_file="test.qout",
suffix=".opt_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem",
max_cores=32,
multimode="openmp",
input_file="test.qin",
output_file="test.qout",
suffix=".freq_0",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_working/test.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "test.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem",
max_cores=32,
multimode="openmp",
input_file="test.qin",
output_file="test.qout",
suffix=".opt_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_working/test.qin.opt_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "test.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem",
max_cores=32,
multimode="openmp",
input_file="test.qin",
output_file="test.qout",
suffix=".freq_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_working/test.qin.freq_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "test.qin")).as_dict(),
)
self.assertRaises(StopIteration, myjob.__next__)
class OptFFTest1(TestCase):
def setUp(self):
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "2620_complete/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "2620_complete/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout.opt_0"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=False,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertRaises(StopIteration, myjob.__next__)
class OptFFTest2(TestCase):
def setUp(self):
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "disconnected_but_converged/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "disconnected_but_converged/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "disconnected_but_converged/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=False,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "disconnected_but_converged/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
self.assertRaises(StopIteration, myjob.__next__)
class OptFFTestSwitching(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "FF_switching/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_switching/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_switching/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_switching/mol.qout.opt_1"),
os.path.join(scr_dir, "mol.qout.opt_1"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_switching/mol.qout.freq_1"),
os.path.join(scr_dir, "mol.qout.freq_1"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_switching/mol.qout.opt_2"),
os.path.join(scr_dir, "mol.qout.opt_2"),
)
shutil.copyfile(
os.path.join(test_dir, "FF_switching/mol.qout.freq_2"),
os.path.join(scr_dir, "mol.qout.freq_2"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=False,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_switching/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_switching/mol.qin.opt_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_switching/mol.qin.freq_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_2",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_switching/mol.qin.opt_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_2",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "FF_switching/mol.qin.freq_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
self.assertRaises(StopIteration, myjob.__next__)
class OptFFTest6004(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "6004_frag12/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "6004_frag12/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "6004_frag12/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
shutil.copyfile(
os.path.join(test_dir, "6004_frag12/mol.qout.opt_1"),
os.path.join(scr_dir, "mol.qout.opt_1"),
)
shutil.copyfile(
os.path.join(test_dir, "6004_frag12/mol.qout.freq_1"),
os.path.join(scr_dir, "mol.qout.freq_1"),
)
shutil.copyfile(
os.path.join(test_dir, "6004_frag12/mol.qout.opt_2"),
os.path.join(scr_dir, "mol.qout.opt_2"),
)
shutil.copyfile(
os.path.join(test_dir, "6004_frag12/mol.qout.freq_2"),
os.path.join(scr_dir, "mol.qout.freq_2"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=False,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "6004_frag12/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "6004_frag12/mol.qin.opt_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "6004_frag12/mol.qin.freq_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_2",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "6004_frag12/mol.qin.opt_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_2",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "6004_frag12/mol.qin.freq_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
class OptFFTest5952(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "5952_frag16/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "5952_frag16/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "5952_frag16/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=False,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "5952_frag16/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
self.assertRaises(Exception, myjob.__next__)
class OptFFTest5690(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "5690_frag18/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "5690_frag18/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "5690_frag18/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
shutil.copyfile(
os.path.join(test_dir, "5690_frag18/mol.qout.opt_1"),
os.path.join(scr_dir, "mol.qout.opt_1"),
)
shutil.copyfile(
os.path.join(test_dir, "5690_frag18/mol.qout.freq_1"),
os.path.join(scr_dir, "mol.qout.freq_1"),
)
shutil.copyfile(
os.path.join(test_dir, "5690_frag18/mol.qout.opt_2"),
os.path.join(scr_dir, "mol.qout.opt_2"),
)
shutil.copyfile(
os.path.join(test_dir, "5690_frag18/mol.qout.freq_2"),
os.path.join(scr_dir, "mol.qout.freq_2"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=False,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "5690_frag18/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "5690_frag18/mol.qin.opt_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_1",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "5690_frag18/mol.qin.freq_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_2",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "5690_frag18/mol.qin.opt_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_2",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "5690_frag18/mol.qin.freq_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
self.assertRaises(Exception, myjob.__next__)
class OptFF_small_neg_freq(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
os.makedirs(os.path.join(scr_dir, "scratch"))
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qin.opt_0"),
os.path.join(scr_dir, "mol.qin.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qout.opt_0"),
os.path.join(scr_dir, "mol.qout.opt_0"),
)
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qout.opt_1"),
os.path.join(scr_dir, "mol.qout.opt_1"),
)
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qout.freq_1"),
os.path.join(scr_dir, "mol.qout.freq_1"),
)
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qout.opt_2"),
os.path.join(scr_dir, "mol.qout.opt_2"),
)
shutil.copyfile(
os.path.join(test_dir, "small_neg_freq/mol.qout.freq_2"),
os.path.join(scr_dir, "mol.qout.freq_2"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=True,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_0",
save_scratch=True,
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "small_neg_freq/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.freq_0"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_1",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "small_neg_freq/mol.qin.opt_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.opt_1"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_1",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "small_neg_freq/mol.qin.freq_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.freq_1"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".opt_2",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "small_neg_freq/mol.qin.opt_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.opt_2"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_2",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "small_neg_freq/mol.qin.freq_2")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.freq_2"),
)
self.assertRaises(StopIteration, myjob.__next__)
class TSFFTest(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_no_freqfirst/mol.qin.freq_0"),
os.path.join(scr_dir, "test.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_no_freqfirst/mol.qout.ts_0"),
os.path.join(scr_dir, "test.qout.ts_0"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_no_freqfirst/mol.qout.freq_0"),
os.path.join(scr_dir, "test.qout.freq_0"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem",
max_cores=32,
input_file="test.qin",
output_file="test.qout",
linked=False,
transition_state=True,
)
expected_next = QCJob(
qchem_command="qchem",
max_cores=32,
multimode="openmp",
input_file="test.qin",
output_file="test.qout",
suffix=".ts_0",
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem",
max_cores=32,
multimode="openmp",
input_file="test.qin",
output_file="test.qout",
suffix=".freq_0",
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "fftsopt_no_freqfirst/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "test.qin")).as_dict(),
)
self.assertRaises(StopIteration, myjob.__next__)
class TSFF_freqfirst(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
os.makedirs(os.path.join(scr_dir, "scratch"))
shutil.copyfile(
os.path.join(test_dir, "fftsopt_freqfirst/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_freqfirst/mol.qin.freq_pre"),
os.path.join(scr_dir, "mol.qin.freq_pre"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_freqfirst/mol.qout.freq_pre"),
os.path.join(scr_dir, "mol.qout.freq_pre"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_freqfirst/mol.qout.ts_0"),
os.path.join(scr_dir, "mol.qout.ts_0"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_freqfirst/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=True,
transition_state=True,
freq_before_opt=True,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_pre",
save_scratch=True,
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".ts_0",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "fftsopt_freqfirst/mol.qin.ts_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.ts_0"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "fftsopt_freqfirst/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.freq_0"),
)
self.assertRaises(StopIteration, myjob.__next__)
class TSFF_freqfirst_multiple_cycles(TestCase):
def setUp(self):
self.maxDiff = None
os.makedirs(scr_dir)
os.makedirs(os.path.join(scr_dir, "scratch"))
shutil.copyfile(
os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qin.orig"),
os.path.join(scr_dir, "mol.qin"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qin.freq_pre"),
os.path.join(scr_dir, "mol.qin.freq_pre"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qout.freq_pre"),
os.path.join(scr_dir, "mol.qout.freq_pre"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qout.ts_0"),
os.path.join(scr_dir, "mol.qout.ts_0"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qout.freq_0"),
os.path.join(scr_dir, "mol.qout.freq_0"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qout.ts_1"),
os.path.join(scr_dir, "mol.qout.ts_1"),
)
shutil.copyfile(
os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qout.freq_1"),
os.path.join(scr_dir, "mol.qout.freq_1"),
)
os.chdir(scr_dir)
def tearDown(self):
os.chdir(cwd)
shutil.rmtree(scr_dir)
def test_OptFF(self):
myjob = QCJob.opt_with_frequency_flattener(
qchem_command="qchem -slurm",
max_cores=32,
input_file="mol.qin",
output_file="mol.qout",
linked=True,
transition_state=True,
freq_before_opt=True,
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_pre",
save_scratch=True,
backup=True,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".ts_0",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qin.ts_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.ts_0"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_0",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qin.freq_0")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.freq_0"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".ts_1",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qin.ts_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.ts_1"),
)
expected_next = QCJob(
qchem_command="qchem -slurm",
max_cores=32,
multimode="openmp",
input_file="mol.qin",
output_file="mol.qout",
suffix=".freq_1",
save_scratch=True,
backup=False,
).as_dict()
self.assertEqual(next(myjob).as_dict(), expected_next)
self.assertEqual(
QCInput.from_file(os.path.join(test_dir, "fftsopt_multiple_cycles/mol.qin.freq_1")).as_dict(),
QCInput.from_file(os.path.join(scr_dir, "mol.qin")).as_dict(),
)
shutil.copyfile(
os.path.join(scr_dir, "mol.qin"),
os.path.join(scr_dir, "mol.qin.freq_1"),
)
self.assertRaises(StopIteration, myjob.__next__)
if __name__ == "__main__":
unittest.main()
| 35.142123 | 109 | 0.555596 |
79451c4280356e64ed97bb3088c81d212d7e01d5 | 46,648 | py | Python | tensorflow/contrib/learn/python/learn/estimators/estimator.py | PedroLelis/tensorflow | 8852b0032ad49acbc59009776665c60f86c06f91 | [
"Apache-2.0"
] | 1 | 2019-02-16T10:41:53.000Z | 2019-02-16T10:41:53.000Z | tensorflow/contrib/learn/python/learn/estimators/estimator.py | PedroLelis/tensorflow | 8852b0032ad49acbc59009776665c60f86c06f91 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/learn/python/learn/estimators/estimator.py | PedroLelis/tensorflow | 8852b0032ad49acbc59009776665c60f86c06f91 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import itertools
import os
import tempfile
import time
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
# TODO(roumposg): Migrate external users to tf.learn.contrib.ModeKeys and delete
# this.
ModeKeys = model_fn_lib.ModeKeys # pylint: disable=invalid-name
# TODO(roumposg): Migrate external users to model.ModelFnOps and delete this.
ModelFnOps = model_fn_lib.ModelFnOps # pylint: disable=invalid-name
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
df = data_feeder.setup_train_data_feeder(x, y, n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
return input_fn, feed_fn
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.job_name:
worker_device = '/job:%s/task:%d' % (config.job_name, config.task)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=False, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide the following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
`Estimator` implemented below is a good example of how to use this class.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size'
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
input_fn, feed_fn = _get_input_fn(x, y, input_fn, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
loss = self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors,
max_steps=max_steps)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size'
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size'
)
def evaluate(
self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None,
steps=None, metrics=None, name=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
input_fn, feed_fn = _get_input_fn(x, y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'batch_size',
'as_iterable'
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=input_fn, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
return self._infer_model(
input_fn=input_fn, feed_fn=feed_fn, outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated_arg_values(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. '
'input_fn (and in most cases, input_feature_key) will become required '
'args, and use_deprecated_input_fn will default to False and be removed '
'altogether.',
use_deprecated_input_fn=True,
input_fn=None)
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _train_model(self,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
# TODO(wicke): Remove this once Model and associated code are gone.
if hasattr(self._config, 'execution_mode'):
if self._config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
self._config.training_worker_max_startup_secs,
self._config.task *
self._config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
self._config.task)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or self._device_fn
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
# The default return type of _get_train_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_train_ops returns a
# (train_op, loss) tuple. The following else-statement code covers these
# cases, but will soon be deleted after the subclasses are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
train_ops = self._get_train_ops(features, labels)
if isinstance(train_ops, ModelFnOps): # Default signature
train_op = train_ops.train_op
loss_op = train_ops.loss
else: # Legacy signature
if len(train_ops) != 2:
raise ValueError('Expected a tuple of train_op and loss, got {}'.
format(train_ops))
train_op = train_ops[0]
loss_op = train_ops[1]
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
ops.add_to_collection(ops.GraphKeys.LOSSES, loss_op)
return graph_actions._monitored_train( # pylint: disable=protected-access
graph=g,
output_dir=self._model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=self.config.is_chief,
supervisor_master=self._config.master,
supervisor_save_model_secs=self._config.save_checkpoints_secs,
supervisor_save_model_steps=self._config.save_checkpoints_steps,
supervisor_save_summaries_steps=self._config.save_summary_steps,
keep_checkpoint_max=self._config.keep_checkpoint_max,
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
hooks=hooks,
max_steps=max_steps)
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name=''):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained.
checkpoint_path = self._model_dir
latest_path = saver.latest_checkpoint(checkpoint_path)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% checkpoint_path)
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
# The default return type of _get_eval_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_eval_ops returns an
# `eval_dict` dictionary of Tensors. The following else-statement code
# covers these cases, but will soon be deleted after the subclasses are
# updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
eval_ops = self._get_eval_ops(features, labels, metrics)
if isinstance(eval_ops, ModelFnOps): # Default signature
eval_dict = eval_ops.eval_metric_ops
else: # Legacy signature
eval_dict = eval_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
eval_results, current_global_step = graph_actions.evaluate(
graph=g,
output_dir=eval_dir,
checkpoint_path=checkpoint_path,
eval_dict=eval_dict,
update_op=update_op,
global_step_tensor=global_step,
supervisor_master=self._config.evaluation_master,
feed_fn=feed_fn,
max_steps=steps)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(
self, input_fn, feed_fn=None, outputs=None, as_iterable=True):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
# The default return type of _get_predict_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_predict_ops returns a
# `predictions` Tensor or dict or Tensors. The following else-statement
# code covers these cases, but will soon be deleted after the subclasses
# are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
infer_ops = self._get_predict_ops(features)
if isinstance(infer_ops, ModelFnOps): # Default signature
predictions = infer_ops.predictions
else: # Legacy signature
predictions = infer_ops
# If predictions is single output - wrap it into dict, and remember to
# return not a dict.
return_dict = isinstance(predictions, dict)
if not return_dict:
predictions = {'predictions': predictions}
# Filter what to run predictions on, if outputs provided.
if outputs:
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
if as_iterable:
return self._infer_model_as_iterable(
checkpoint_path, predictions, feed_fn, return_dict)
else:
return self._infer_model_single(
checkpoint_path, predictions, feed_fn, return_dict)
def _infer_model_single(
self, checkpoint_path, predictions, feed_fn, return_dict):
if feed_fn is None:
preds = graph_actions.infer(checkpoint_path, predictions)
else:
def _feed_fn():
while True:
yield feed_fn()
outputs = graph_actions.run_feeds(
output_dict=predictions,
feed_dicts=_feed_fn(),
restore_checkpoint_path=checkpoint_path)
preds = {
key: np.concatenate([output[key] for output in outputs], axis=0)
for key in predictions}
return preds if return_dict else preds['predictions']
def _infer_model_as_iterable(
self, checkpoint_path, predictions, feed_fn, return_dict):
if feed_fn is None:
feed_dicts = itertools.repeat(None)
else:
def _feed_fn():
while True:
yield feed_fn()
feed_dicts = _feed_fn()
try:
for output_batch in graph_actions.run_feeds_iter(
output_dict=predictions,
feed_dicts=feed_dicts,
restore_checkpoint_path=checkpoint_path):
# Unpack batches into individual predictions
if return_dict:
batch_length = list(output_batch.values())[0].shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(output_batch)}
else:
for pred in output_batch['predictions']:
yield pred
except errors.OutOfRangeError:
# We fall out of the above loop naturally if feed_fn raises StopIteration,
# or we catch an OutOfRangeError if we've reached the end of inputs.
logging.info('Reached end of inputs for predict_iter.')
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features` are single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode` specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params` is a `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
if 'mode' in model_fn_args:
if 'params' in model_fn_args:
model_fn_results = self._model_fn(features, labels, mode=mode,
params=self.params)
else:
model_fn_results = self._model_fn(features, labels, mode=mode)
else:
model_fn_results = self._model_fn(features, labels)
if isinstance(model_fn_results, ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(features, labels, ModeKeys.EVAL)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, ModeKeys.INFER)
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
loss = self._estimator._train_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors,
max_steps=max_steps)
logging.info('Loss for final step: %s.', loss)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
return self._estimator._infer_model(
input_fn=input_fn, feed_fn=feed_fn, outputs=outputs,
as_iterable=False)
| 39.972579 | 85 | 0.669332 |
79451dcde0249e930f89c984e48640fd17eba3af | 1,312 | py | Python | Accelerometer_and_circular_store/seri.py | PremSuresh/Udaya-bon | 27298512e33815a08807896e8743b08ad4e09355 | [
"MIT"
] | 4 | 2022-02-27T18:45:52.000Z | 2022-03-01T05:24:56.000Z | Accelerometer_and_circular_store/seri.py | PremSuresh/Udaya-bon | 27298512e33815a08807896e8743b08ad4e09355 | [
"MIT"
] | null | null | null | Accelerometer_and_circular_store/seri.py | PremSuresh/Udaya-bon | 27298512e33815a08807896e8743b08ad4e09355 | [
"MIT"
] | null | null | null | import csv
import serial
import time
import numpy
z1baudrate = 115200
z1port = 'COM3' # set the correct port before run it
b = 0.00
z1serial = serial.Serial(port=z1port, baudrate=z1baudrate)
z1serial.timeout = 2 # set read timeout
# print z1serial # debug serial.
print(z1serial.is_open) # True for opened
if z1serial.is_open:
while True:
size = z1serial.inWaiting()
if size:
data = z1serial.read(size)
a = data.decode()
a.split(',')
if a[7]=='-':
s=(a[7]+a[8]+a[9]+a[10]+a[11])
b=float(s)
print(b)
c = (a[20]+a[21]+a[22]+a[23]+a[24])
d = float(c)
g = 29.0
if b / g <= -1:
print("PROBABLE ACCIDENT")
exit(0)
else:
s = (a[7]+a[8]+a[9]+a[10])
b=float(s)
print(b)
c = (a[20] + a[21] + a[22] + a[23])
d = float(c)
h = 10
if d/h >=1:
print("PROBABLE ACCIDENT")
print(data)
else:
print('no data')
time.sleep(1)
else:
print('z1serial not open')
# z1serial.close() # close z1serial if z1serial is open.
| 26.77551 | 58 | 0.448933 |
79451e94bdf75ce76de144b3df62cca58d4c9e56 | 9,050 | py | Python | models/loop_qcd_qed_sm/object_library.py | khurtado/MG5_aMC | 9cde676b0a1097058c416983017af257385fa375 | [
"NCSA"
] | 5 | 2018-10-23T14:37:18.000Z | 2021-11-22T20:59:02.000Z | models/loop_qcd_qed_sm/object_library.py | khurtado/MG5_aMC | 9cde676b0a1097058c416983017af257385fa375 | [
"NCSA"
] | 26 | 2018-10-08T15:49:32.000Z | 2020-05-15T13:33:36.000Z | models/loop_qcd_qed_sm/object_library.py | khurtado/MG5_aMC | 9cde676b0a1097058c416983017af257385fa375 | [
"NCSA"
] | 4 | 2019-02-18T11:42:18.000Z | 2021-11-11T20:46:08.000Z | ##
##
## Feynrules Header
##
##
##
##
##
import cmath
import re
class UFOError(Exception):
"""Exception raised if when inconsistencies are detected in the UFO model."""
pass
class UFOBaseClass(object):
"""The class from which all FeynRules classes are derived."""
require_args = []
def __init__(self, *args, **options):
assert(len(self.require_args) == len (args))
for i, name in enumerate(self.require_args):
setattr(self, name, args[i])
for (option, value) in options.items():
setattr(self, option, value)
def get(self, name):
return getattr(self, name)
def set(self, name, value):
setattr(self, name, value)
def get_all(self):
"""Return a dictionary containing all the information of the object"""
return self.__dict__
def __str__(self):
return self.name
def nice_string(self):
""" return string with the full information """
return '\n'.join(['%s \t: %s' %(name, value) for name, value in self.__dict__.items()])
def __repr__(self):
replacements = [
('+','__plus__'),
('-','__minus__'),
('@','__at__'),
('!','__exclam__'),
('?','__quest__'),
('*','__star__'),
('~','__tilde__')
]
text = self.name
for orig,sub in replacements:
text = text.replace(orig,sub)
return text
all_particles = []
class Particle(UFOBaseClass):
"""A standard Particle"""
require_args=['pdg_code', 'name', 'antiname', 'spin', 'color', 'mass', 'width', 'texname', 'antitexname', 'charge']
require_args_all = ['pdg_code', 'name', 'antiname', 'spin', 'color', 'mass', 'width', 'texname', 'antitexname', 'charge', 'loop_particles', 'counterterm','line', 'propagating', 'goldstoneboson']
def __init__(self, pdg_code, name, antiname, spin, color, mass, width, texname,
antitexname, charge , loop_particles=None, counterterm=None, line=None, propagating=True, goldstoneboson=False, **options):
args= (pdg_code, name, antiname, spin, color, mass, width, texname,
antitexname, float(charge))
UFOBaseClass.__init__(self, *args, **options)
global all_particles
all_particles.append(self)
self.propagating = propagating
self.goldstoneboson= goldstoneboson
self.selfconjugate = (name == antiname)
if 1: #not line:
self.line = self.find_line_type()
else:
self.line = line
def find_line_type(self):
""" find how we draw a line if not defined
valid output: dashed/straight/wavy/curly/double/swavy/scurly
"""
spin = self.spin
color = self.color
#use default
if spin == 1:
return 'dashed'
elif spin == 2:
if not self.selfconjugate:
return 'straight'
elif color == 1:
return 'swavy'
else:
return 'scurly'
elif spin == 3:
if color == 1:
return 'wavy'
else:
return 'curly'
elif spin == 5:
return 'double'
elif spin == -1:
return 'dashed' # return 'dotted' ## not suported yet
else:
return 'dashed' # not supported yet
def anti(self):
# We do not copy the UV wavefunction renormalization as it is defined for the particle only.
if self.selfconjugate:
raise Exception('%s has no anti particle.' % self.name)
outdic = {}
for k,v in self.__dict__.iteritems():
if k not in self.require_args_all:
outdic[k] = -v
if self.color in [1,8]:
newcolor = self.color
else:
newcolor = -self.color
return Particle(-self.pdg_code, self.antiname, self.name, self.spin, newcolor, self.mass, self.width,
self.antitexname, self.texname, -self.charge, self.line, self.propagating, self.goldstoneboson, **outdic)
all_parameters = []
class Parameter(UFOBaseClass):
require_args=['name', 'nature', 'type', 'value', 'texname']
def __init__(self, name, nature, type, value, texname, lhablock=None, lhacode=None):
args = (name,nature,type,value,texname)
UFOBaseClass.__init__(self, *args)
args=(name,nature,type,value,texname)
global all_parameters
all_parameters.append(self)
if (lhablock is None or lhacode is None) and nature == 'external':
raise Exception('Need LHA information for external parameter "%s".' % name)
self.lhablock = lhablock
self.lhacode = lhacode
all_CTparameters = []
class CTParameter(UFOBaseClass):
require_args=['name', 'nature,', 'type', 'value', 'texname']
def __init__(self, name, type, value, texname):
args = (name,'internal',type,value,texname)
UFOBaseClass.__init__(self, *args)
args=(name,'internal',type,value,texname)
self.nature='interal'
global all_CTparameters
all_CTparameters.append(self)
def finite(self):
try:
return self.value[0]
except KeyError:
return 'ZERO'
def pole(self, x):
try:
return self.value[-x]
except KeyError:
return 'ZERO'
all_vertices = []
class Vertex(UFOBaseClass):
require_args=['name', 'particles', 'color', 'lorentz', 'couplings']
def __init__(self, name, particles, color, lorentz, couplings, **opt):
args = (name, particles, color, lorentz, couplings)
UFOBaseClass.__init__(self, *args, **opt)
args=(particles,color,lorentz,couplings)
global all_vertices
all_vertices.append(self)
all_CTvertices = []
class CTVertex(UFOBaseClass):
require_args=['name', 'particles', 'color', 'lorentz', 'couplings', 'type', 'loop_particles']
def __init__(self, name, particles, color, lorentz, couplings, type, loop_particles, **opt):
args = (name, particles, color, lorentz, couplings, type, loop_particles)
UFOBaseClass.__init__(self, *args, **opt)
args=(particles,color,lorentz,couplings, type, loop_particles)
global all_CTvertices
all_CTvertices.append(self)
all_couplings = []
class Coupling(UFOBaseClass):
require_args=['name', 'value', 'order']
require_args_all=['name', 'value', 'order', 'loop_particles', 'counterterm']
def __init__(self, name, value, order, loop_particles=None, counterterm=None, **opt):
args =(name, value, order)
UFOBaseClass.__init__(self, *args, **opt)
global all_couplings
all_couplings.append(self)
def value(self):
return self.pole(0)
def pole(self, x):
""" the self.value attribute can be a dictionary directly specifying the Laurent serie using normal
parameter or just a string which can possibly contain CTparameter defining the Laurent serie."""
if isinstance(self.value,dict):
if -x in self.value.keys():
return self.value[-x]
else:
return 'ZERO'
else:
if x==0:
return self.value
else:
return 'ZERO'
all_lorentz = []
class Lorentz(UFOBaseClass):
require_args=['name','spins','structure']
def __init__(self, name, spins, structure='external', **opt):
args = (name, spins, structure)
UFOBaseClass.__init__(self, *args, **opt)
global all_lorentz
all_lorentz.append(self)
all_functions = []
class Function(object):
def __init__(self, name, arguments, expression):
global all_functions
all_functions.append(self)
self.name = name
self.arguments = arguments
self.expr = expression
def __call__(self, *opt):
for i, arg in enumerate(self.arguments):
exec('%s = %s' % (arg, opt[i] ))
return eval(self.expr)
all_orders = []
class CouplingOrder(object):
def __init__(self, name, expansion_order, hierarchy, perturbative_expansion = 0):
global all_orders
all_orders.append(self)
self.name = name
self.expansion_order = expansion_order
self.hierarchy = hierarchy
self.perturbative_expansion = perturbative_expansion
all_decays = []
class Decay(UFOBaseClass):
require_args = ['particle','partial_widths']
def __init__(self, particle, partial_widths, **opt):
args = (particle, partial_widths)
UFOBaseClass.__init__(self, *args, **opt)
global all_decays
all_decays.append(self)
# Add the information directly to the particle
particle.partial_widths = partial_widths
| 27.591463 | 198 | 0.587403 |
79451f91ab5d6a9886e191f4fa38ace98892ef39 | 1,134 | py | Python | py/py_0419_look_and_say_sequence.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0419_look_and_say_sequence.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0419_look_and_say_sequence.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 419: Look and say sequence
# https://projecteuler.net/problem=419
#
# The look and say sequence goes 1, 11, 21, 1211, 111221, 312211, 13112221,
# 1113213211, . . . The sequence starts with 1 and all other members are
# obtained by describing the previous member in terms of consecutive digits.
# It helps to do this out loud:1 is 'one one' → 1111 is 'two ones' → 2121 is
# 'one two and one one' → 1211 1211 is 'one one, one two and two ones' →
# 111221111221 is 'three ones, two twos and one one' → 312211. . . Define
# A(n), B(n) and C(n) as the number of ones, twos and threes in the n'th
# element of the sequence respectively. One can verify that A(40) = 31254,
# B(40) = 20259 and C(40) = 11625. Find A(n), B(n) and C(n) for n = 1012. Give
# your answer modulo 230 and separate your values for A, B and C by a comma.
# E. g. for n = 40 the answer would be 31254,20259,11625
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 419
timed.caller(dummy, n, i, prob_id)
| 37.8 | 79 | 0.677249 |
7945210ec0b26191f18cf6abc132542dafe6fe02 | 8,359 | py | Python | python/src/iceberg/transforms.py | zzcclp/iceberg | bf582ebf68ea05be26f38d786584d474aebe048b | [
"Apache-2.0"
] | null | null | null | python/src/iceberg/transforms.py | zzcclp/iceberg | bf582ebf68ea05be26f38d786584d474aebe048b | [
"Apache-2.0"
] | 1 | 2021-11-02T00:48:17.000Z | 2021-11-02T00:48:17.000Z | python/src/iceberg/transforms.py | zzcclp/iceberg | bf582ebf68ea05be26f38d786584d474aebe048b | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import struct
from abc import ABC
from decimal import Decimal
from typing import Generic, Optional, TypeVar
from uuid import UUID
import mmh3 # type: ignore
from iceberg.types import (
BinaryType,
DateType,
DecimalType,
FixedType,
IcebergType,
IntegerType,
LongType,
StringType,
TimestampType,
TimestamptzType,
TimeType,
UUIDType,
)
from iceberg.utils.decimal import decimal_to_bytes
S = TypeVar("S")
T = TypeVar("T")
class Transform(ABC, Generic[S, T]):
"""Transform base class for concrete transforms.
A base class to transform values and project predicates on partition values.
This class is not used directly. Instead, use one of module method to create the child classes.
Args:
transform_string (str): name of the transform type
repr_string (str): string representation of a transform instance
"""
def __init__(self, transform_string: str, repr_string: str):
self._transform_string = transform_string
self._repr_string = repr_string
def __repr__(self):
return self._repr_string
def __str__(self):
return self._transform_string
def __call__(self, value: S) -> Optional[T]:
return self.apply(value)
def apply(self, value: S) -> Optional[T]:
...
def can_transform(self, source: IcebergType) -> bool:
return False
def result_type(self, source: IcebergType) -> IcebergType:
...
@property
def preserves_order(self) -> bool:
return False
def satisfies_order_of(self, other) -> bool:
return self == other
def to_human_string(self, value) -> str:
if value is None:
return "null"
return str(value)
@property
def dedup_name(self) -> str:
return self._transform_string
class BaseBucketTransform(Transform[S, int]):
"""Base Transform class to transform a value into a bucket partition value
Transforms are parameterized by a number of buckets. Bucket partition transforms use a 32-bit
hash of the source value to produce a positive value by mod the bucket number.
Args:
source_type (Type): An Iceberg Type of IntegerType, LongType, DecimalType, DateType, TimeType,
TimestampType, TimestamptzType, StringType, BinaryType, FixedType, UUIDType.
num_buckets (int): The number of buckets.
"""
def __init__(self, source_type: IcebergType, num_buckets: int):
super().__init__(
f"bucket[{num_buckets}]",
f"transforms.bucket(source_type={repr(source_type)}, num_buckets={num_buckets})",
)
self._num_buckets = num_buckets
@property
def num_buckets(self) -> int:
return self._num_buckets
def hash(self, value: S) -> int:
raise NotImplementedError()
def apply(self, value: S) -> Optional[int]:
if value is None:
return None
return (self.hash(value) & IntegerType.max) % self._num_buckets
def result_type(self, source: IcebergType) -> IcebergType:
return IntegerType()
class BucketNumberTransform(BaseBucketTransform):
"""Transforms a value of IntegerType, LongType, DateType, TimeType, TimestampType, or TimestamptzType
into a bucket partition value
Example:
>>> transform = BucketNumberTransform(LongType(), 100)
>>> transform.apply(81068000000)
59
"""
def can_transform(self, source: IcebergType) -> bool:
return type(source) in {IntegerType, DateType, LongType, TimeType, TimestampType, TimestamptzType}
def hash(self, value) -> int:
return mmh3.hash(struct.pack("<q", value))
class BucketDecimalTransform(BaseBucketTransform):
"""Transforms a value of DecimalType into a bucket partition value.
Example:
>>> transform = BucketDecimalTransform(DecimalType(9, 2), 100)
>>> transform.apply(Decimal("14.20"))
59
"""
def can_transform(self, source: IcebergType) -> bool:
return isinstance(source, DecimalType)
def hash(self, value: Decimal) -> int:
return mmh3.hash(decimal_to_bytes(value))
class BucketStringTransform(BaseBucketTransform):
"""Transforms a value of StringType into a bucket partition value.
Example:
>>> transform = BucketStringTransform(100)
>>> transform.apply("iceberg")
89
"""
def __init__(self, num_buckets: int):
super().__init__(StringType(), num_buckets)
def can_transform(self, source: IcebergType) -> bool:
return isinstance(source, StringType)
def hash(self, value: str) -> int:
return mmh3.hash(value)
class BucketBytesTransform(BaseBucketTransform):
"""Transforms a value of FixedType or BinaryType into a bucket partition value.
Example:
>>> transform = BucketBytesTransform(BinaryType(), 100)
>>> transform.apply(b"\\x00\\x01\\x02\\x03")
41
"""
def can_transform(self, source: IcebergType) -> bool:
return type(source) in {FixedType, BinaryType}
def hash(self, value: bytes) -> int:
return mmh3.hash(value)
class BucketUUIDTransform(BaseBucketTransform):
"""Transforms a value of UUIDType into a bucket partition value.
Example:
>>> transform = BucketUUIDTransform(100)
>>> transform.apply(UUID("f79c3e09-677c-4bbd-a479-3f349cb785e7"))
40
"""
def __init__(self, num_buckets: int):
super().__init__(UUIDType(), num_buckets)
def can_transform(self, source: IcebergType) -> bool:
return isinstance(source, UUIDType)
def hash(self, value: UUID) -> int:
return mmh3.hash(
struct.pack(
">QQ",
(value.int >> 64) & 0xFFFFFFFFFFFFFFFF,
value.int & 0xFFFFFFFFFFFFFFFF,
)
)
class UnknownTransform(Transform):
"""A transform that represents when an unknown transform is provided
Args:
source_type (Type): An Iceberg `Type`
transform (str): A string name of a transform
Raises:
AttributeError: If the apply method is called.
"""
def __init__(self, source_type: IcebergType, transform: str):
super().__init__(
transform,
f"transforms.UnknownTransform(source_type={repr(source_type)}, transform={repr(transform)})",
)
self._type = source_type
self._transform = transform
def apply(self, value):
raise AttributeError(f"Cannot apply unsupported transform: {self}")
def can_transform(self, target: IcebergType) -> bool:
return self._type == target
def result_type(self, source: IcebergType) -> IcebergType:
return StringType()
def bucket(source_type: IcebergType, num_buckets: int) -> BaseBucketTransform:
if type(source_type) in {IntegerType, LongType, DateType, TimeType, TimestampType, TimestamptzType}:
return BucketNumberTransform(source_type, num_buckets)
elif isinstance(source_type, DecimalType):
return BucketDecimalTransform(source_type, num_buckets)
elif isinstance(source_type, StringType):
return BucketStringTransform(num_buckets)
elif isinstance(source_type, BinaryType):
return BucketBytesTransform(source_type, num_buckets)
elif isinstance(source_type, FixedType):
return BucketBytesTransform(source_type, num_buckets)
elif isinstance(source_type, UUIDType):
return BucketUUIDTransform(num_buckets)
else:
raise ValueError(f"Cannot bucket by type: {source_type}")
| 31.307116 | 106 | 0.67867 |
794521e6159cdcab684371529ea857f68d7bfdf6 | 281 | py | Python | Hyeongdo/greedy/boj/13558.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | 2 | 2021-09-27T19:10:36.000Z | 2021-11-09T05:40:39.000Z | Hyeongdo/greedy/boj/13558.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | 1 | 2021-11-15T14:56:54.000Z | 2021-11-15T14:56:54.000Z | Hyeongdo/greedy/boj/13558.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | null | null | null | # 등차수열의 개수
n = int(input())
num_list = list(map(int, input().split(" ")))
count = 0
for i in range(n-2):
for j in range(i+1, n-1):
for k in range(j+1, n):
if num_list[j] - num_list[i] == num_list[k] - num_list[j]:
count += 1
print(count) | 20.071429 | 70 | 0.519573 |
794522d7704ade598f6667b459f77e1e28e45093 | 1,670 | py | Python | tests/py3.py | ariebovenberg/gentools | 4a1f9f928c7f8b4752b69168858e83b4b23d6bcb | [
"MIT"
] | 8 | 2018-01-23T08:43:16.000Z | 2022-02-02T12:09:28.000Z | tests/py3.py | ariebovenberg/gentools | 4a1f9f928c7f8b4752b69168858e83b4b23d6bcb | [
"MIT"
] | 9 | 2018-01-21T11:31:40.000Z | 2018-03-02T18:02:53.000Z | tests/py3.py | ariebovenberg/gentools | 4a1f9f928c7f8b4752b69168858e83b4b23d6bcb | [
"MIT"
] | null | null | null | """only python3-compatible generators"""
def oneway_delegator(gen):
return (yield from gen)
def delegator(gen):
try:
return (yield from gen)
except GeneratorExit:
return
def try_until_positive(req):
"""an example relay"""
response = yield req
while response < 0:
try:
response = yield 'NOT POSITIVE!'
except GeneratorExit:
return
except ValueError:
yield 'caught ValueError'
return response
def try_until_even(req):
"""an example relay"""
response = yield req
while response % 2:
try:
response = yield 'NOT EVEN!'
except GeneratorExit:
return
except ValueError:
yield 'caught ValueError'
return response
def mymax(val):
"""an example generator function"""
while val < 100:
try:
sent = yield val
except GeneratorExit:
return
except ValueError:
sent = yield 'caught ValueError'
except TypeError:
return 'mymax: type error'
if sent > val:
val = sent
return val * 3
class MyMax:
"""an example generator iterable"""
def __init__(self, start):
self.start = start
def __iter__(self):
val = self.start
while val < 100:
try:
sent = yield val
except GeneratorExit:
return
except ValueError:
yield 'caught ValueError'
if sent > val:
val = sent
return val * 3
def emptygen():
if False:
yield
return 99
| 20.617284 | 44 | 0.537725 |
7945230ea92770bebbc2c9d1004bd12cce649c6c | 276 | py | Python | spikeforest2_utils/autoextractors/__init__.py | michaeljohnclancy/spikeforest2 | 93bdde2c570aef9426b3d7bceb69f3605c9f005a | [
"Apache-2.0"
] | 26 | 2020-02-03T02:12:20.000Z | 2022-03-25T09:14:32.000Z | spikeforest2_utils/autoextractors/__init__.py | michaeljohnclancy/spikeforest2 | 93bdde2c570aef9426b3d7bceb69f3605c9f005a | [
"Apache-2.0"
] | 27 | 2020-01-10T12:35:55.000Z | 2021-08-01T23:13:52.000Z | spikeforest2_utils/autoextractors/__init__.py | michaeljohnclancy/spikeforest2 | 93bdde2c570aef9426b3d7bceb69f3605c9f005a | [
"Apache-2.0"
] | 11 | 2019-02-15T15:21:47.000Z | 2021-09-23T01:07:24.000Z | from .autosortingextractor import AutoSortingExtractor
from .autorecordingextractor import AutoRecordingExtractor
from .mdaextractors import DiskReadMda, readmda, writemda32, writemda64, writemda, appendmda
from .mdaextractors import MdaRecordingExtractor, MdaSortingExtractor | 69 | 92 | 0.887681 |
79452465432b66b69b7902eb37388a0ee5b82e0e | 1,288 | py | Python | examples/functions.py | cyrilbois/PFNET.py | 81d2fd911c6e6aae4c5de0d1739c6f5361799ce2 | [
"BSD-2-Clause"
] | 3 | 2018-03-21T11:54:38.000Z | 2020-12-29T16:46:14.000Z | examples/functions.py | cyrilbois/PFNET.py | 81d2fd911c6e6aae4c5de0d1739c6f5361799ce2 | [
"BSD-2-Clause"
] | 23 | 2018-03-29T00:42:06.000Z | 2021-01-05T19:15:05.000Z | examples/functions.py | cyrilbois/PFNET.py | 81d2fd911c6e6aae4c5de0d1739c6f5361799ce2 | [
"BSD-2-Clause"
] | 5 | 2018-10-01T19:05:11.000Z | 2020-05-27T06:19:11.000Z | #***************************************************#
# This file is part of PFNET. #
# #
# Copyright (c) 2015, Tomas Tinoco De Rubira. #
# #
# PFNET is released under the BSD 2-clause license. #
#***************************************************#
# Optimization Problems - Functions
import sys
sys.path.append('.')
import pfnet
net = pfnet.Parser(sys.argv[1]).parse(sys.argv[1])
net.set_flags('bus',
'variable',
'any',
'voltage magnitude')
print(net.num_vars == net.num_buses)
func = pfnet.Function('voltage magnitude regularization',0.3,net)
print(func.name == 'voltage magnitude regularization')
print(func.weight)
x = net.get_var_values()
func.analyze()
func.eval(x)
print(x.shape)
print(func.phi)
print(type(func.gphi), func.gphi.shape)
print(type(func.Hphi), func.Hphi.shape)
print(func.Hphi)
# Set parameters
import numpy as np
func = pfnet.Function('variable regularization', 1., net)
w = np.random.randn(net.num_vars)
x0 = np.random.randn(net.num_vars)
func.set_parameter('w', w)
func.set_parameter('x0', x0)
x = np.random.randn(net.num_vars)
func.analyze()
func.eval(x)
print(func.phi)
| 21.466667 | 65 | 0.562112 |
794524c9cdf895471aad644ec2867614338225d3 | 4,966 | py | Python | configs/powerline/mendeley/top_down/hrnet_w32_mendeley_256x256_udp.py | rubeea/pl_mmpose | 3b3643c66db636e8f743d0ac8f8fc14b0d5662fc | [
"Apache-2.0"
] | null | null | null | configs/powerline/mendeley/top_down/hrnet_w32_mendeley_256x256_udp.py | rubeea/pl_mmpose | 3b3643c66db636e8f743d0ac8f8fc14b0d5662fc | [
"Apache-2.0"
] | null | null | null | configs/powerline/mendeley/top_down/hrnet_w32_mendeley_256x256_udp.py | rubeea/pl_mmpose | 3b3643c66db636e8f743d0ac8f8fc14b0d5662fc | [
"Apache-2.0"
] | null | null | null | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
target_type = 'GaussianHeatMap'
channel_cfg = dict(
num_output_channels=3,
dataset_joints=3,
dataset_channel=[
[0, 1, 2],
],
inference_channel=[
0, 1, 2
])
# model settings
model = dict(
type='TopDown',
# pretrained='https://download.openmmlab.com/mmpose/'
# 'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=False,
target_type=target_type,
modulate_kernel=11,
use_udp=True))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
# dict(
# type='TopDownHalfBodyTransform',
# num_joints_half_body=8,
# prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='TopDownGenerateTarget',
sigma=2,
encoding='UDP',
target_type=target_type),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = '/content/pl_mmpose/data/mendeleypl'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=2),
test_dataloader=dict(samples_per_gpu=2),
train=dict(
type='MendeleyPLDataset',
ann_file=f'{data_root}/annotations/mendeleypl_train.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='MendeleyPLDataset',
ann_file=f'{data_root}/annotations/mendeleypl_test.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='MendeleyPLDataset',
ann_file=f'{data_root}/annotations/mendeleypl_test.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 27.588889 | 79 | 0.585179 |
7945255b314df51ccc2a4b1d0d5d8a4c187b0407 | 1,170 | py | Python | python/getselectors.py | hunchly/hunchly-maltego | 84200e0f661a854906b4e5d7634907f8f49c9a29 | [
"Apache-2.0"
] | 5 | 2021-09-11T08:21:58.000Z | 2022-03-18T05:44:41.000Z | python/getselectors.py | hunchly/hunchly-maltego | 84200e0f661a854906b4e5d7634907f8f49c9a29 | [
"Apache-2.0"
] | 3 | 2021-09-14T16:44:47.000Z | 2021-12-20T20:30:23.000Z | python/getselectors.py | hunchly/hunchly-maltego | 84200e0f661a854906b4e5d7634907f8f49c9a29 | [
"Apache-2.0"
] | 2 | 2022-01-05T12:06:13.000Z | 2022-02-26T02:52:55.000Z | ###
# File: getselectors.py
# Authors: Divya Bhagavathiappan ([email protected]) and Justin Seitz ([email protected])
# Last modified: 2021-02-19
###
import json
from entities import *
from MaltegoTransform import *
from config import *
from subprocess import Popen, PIPE
# initialize the transform object
transform = MaltegoTransform()
transform.parseArguments(sys.argv)
# if passed a HunchlyPage we have a page_id
if transform.values.get("page_id"):
process = Popen([hunchly_api_path, 'selector', 'get', '-p', transform.values['page_id']], stdout=PIPE, stderr=PIPE, errors="ignore")
else:
process = Popen([hunchly_api_path, 'selector', 'get', '-n', transform.values['properties.hunchlycase']],
stdout=PIPE, stderr=PIPE, errors="replace")
stdout, stderr = process.communicate()
try:
result = json.loads(stdout)
if result['number_of_results'] != 0:
for result in result['selectors']:
t = HunchlySelector(result['selector'])
convert_entity(transform, t)
except:
transform.addUIMessage("Failed to retrieve results for %s" % transform.values['properties.hunchlycase'])
transform.returnOutput()
| 27.857143 | 136 | 0.705128 |
7945268c3ee2e1089e4630b6c31912879e590d2a | 3,169 | py | Python | flake8_coding.py | malfet/flake8-coding | 1106d953dde998ab6dd7f951efc804dea562d609 | [
"Apache-2.0"
] | null | null | null | flake8_coding.py | malfet/flake8-coding | 1106d953dde998ab6dd7f951efc804dea562d609 | [
"Apache-2.0"
] | null | null | null | flake8_coding.py | malfet/flake8-coding | 1106d953dde998ab6dd7f951efc804dea562d609 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
__version__ = '1.3.3'
class CodingChecker(object):
name = 'flake8_coding'
version = __version__
def __init__(self, tree, filename):
self.filename = filename
@classmethod
def add_options(cls, parser):
parser.add_option(
'--accept-encodings', default='latin-1, utf-8', action='store',
help="Acceptable source code encodings for `coding:` magic comment"
)
parser.add_option(
'--no-accept-encodings', action='store_true', parse_from_config=True,
help="Warn for files containing a `coding:` magic comment"
)
parser.add_option(
'--optional-ascii-coding', action='store_true', parse_from_config=True,
help="Do not force 'coding:' header on ascii only files"
)
if hasattr(parser, 'config_options'): # for flake8 < 3.0
parser.config_options.append('accept-encodings')
parser.config_options.append('no-accept-encodings')
parser.config_options.append('optional-ascii-coding')
@classmethod
def parse_options(cls, options):
if options.no_accept_encodings:
cls.encodings = None
else:
cls.encodings = [e.strip().lower() for e in options.accept_encodings.split(',')]
cls.optional_ascii_coding = options.optional_ascii_coding
@classmethod
def has_non_ascii_characters(cls, lines):
return any(any(ord(c) > 127 for c in line) for line in lines)
def read_lines(self):
if self.filename in ('stdin', '-', None):
try:
# flake8 >= v3.0
from flake8.engine import pep8 as stdin_utils
except ImportError:
from flake8 import utils as stdin_utils
return stdin_utils.stdin_get_value().splitlines(True)
else:
try:
import pycodestyle
except ImportError:
import pep8 as pycodestyle
return pycodestyle.readlines(self.filename)
def run(self):
try:
# PEP-263 says: a magic comment must be placed into the source
# files either as first or second line in the file
lines = self.read_lines()
if len(lines) == 0:
return
for lineno, line in enumerate(lines[:2], start=1):
matched = re.search(r'coding[:=]\s*([-\w.]+)', line, re.IGNORECASE)
if matched:
if self.encodings:
if matched.group(1).lower() not in self.encodings:
yield lineno, 0, "C102 Unknown encoding found in coding magic comment", type(self)
else:
yield lineno, 0, "C103 Coding magic comment present", type(self)
break
else:
if self.encodings:
if not self.optional_ascii_coding or self.has_non_ascii_characters(lines):
yield 1, 0, "C101 Coding magic comment not found", type(self)
except IOError:
pass
| 37.282353 | 110 | 0.568003 |
794526988de26132e4feb19b36a899e0663f79b1 | 635 | py | Python | src/models/loaders/__init__.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | 3 | 2022-02-03T13:25:12.000Z | 2022-02-04T16:12:23.000Z | src/models/loaders/__init__.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | null | null | null | src/models/loaders/__init__.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | 1 | 2022-02-04T12:28:02.000Z | 2022-02-04T12:28:02.000Z | from .convnext import load_convnext_parameters # noqa
from .densenet import load_densenet_parameters # noqa
from .efficientnet import load_efficientnet_parameters # noqa
from .mobilenet import load_mobilenetv2_parameters, load_mobilenetv3_parameters # noqa
from .nfnet import load_nfnet_parameters # noqa
from .regnet import load_regnet_parameters # noqa
from .resnest import load_resnest_parameters # noqa
from .resnet import load_resnet_parameters, load_resnetd_parameters # noqa
from .swin import load_swin_parameters # noqa
from .tresnet import load_tresnet_parameters # noqa
from .vit import load_vit_parameters # noqa
| 52.916667 | 87 | 0.837795 |
794526a0ea0aaf58209bf560b230c4978fff4545 | 6,616 | py | Python | .github/actions/check-version/docker-entrypoint.py | resoliwan/koapy | b0616f252bb3588695dfb37c7d9b8580a65649a3 | [
"MIT"
] | 1 | 2021-09-25T22:33:01.000Z | 2021-09-25T22:33:01.000Z | .github/actions/check-version/docker-entrypoint.py | resoliwan/koapy | b0616f252bb3588695dfb37c7d9b8580a65649a3 | [
"MIT"
] | 16 | 2021-07-20T07:09:01.000Z | 2022-03-31T07:15:34.000Z | .github/actions/check-version/docker-entrypoint.py | resoliwan/koapy | b0616f252bb3588695dfb37c7d9b8580a65649a3 | [
"MIT"
] | 3 | 2021-12-25T09:43:09.000Z | 2022-02-16T14:20:00.000Z | #!/usr/bin/env python
import json
import click
from actions_toolkit import core
class BooleanOption(click.Option):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.type = click.Choice(["true", "false", ""], case_sensitive=False)
self.callback = self._callback
self._option_parser = None
self._option_parser__match_long_opt = None
self._option = None
self._option_process = None
def _callback(self, ctx, param, value):
if value and isinstance(value, str):
value = json.loads(value.lower())
return value
def _get_value_from_state(self, opt, option, state):
if state.rargs and not any(
state.rargs[0].startswith(prefix) for prefix in self._option.prefixes
):
return state.rargs.pop(0)
def _match_long_opt(self, opt, explicit_value, state):
try:
self._option_parser__match_long_opt(opt, explicit_value, state)
except click.BadOptionUsage:
option = self._option_parser._long_opt.get(opt)
if not option:
return
if explicit_value is not None:
value = explicit_value
else:
value = self._get_value_from_state(opt, option, state)
option.process(value, state)
def _process(self, value, state):
if value is None:
value = True
elif not value:
value = None
state.opts[self._option.dest] = value
state.order.append(self._option.obj)
def add_to_parser(self, parser, ctx):
self._option_parser = parser
self._option_parser__match_long_opt = parser._match_long_opt
parser._match_long_opt = self._match_long_opt
super().add_to_parser(parser, ctx)
for name in self.opts:
option = parser._long_opt.get(name) or parser._short_opt.get(name)
if option:
self._option = option
self._option_process = option.process
option.process = self._process
break
@click.command()
@click.argument("vcs", default=core.get_input("vcs", required=False) or "any")
@click.option(
"--metadata", cls=BooleanOption, default=core.get_input("metadata", required=False)
)
@click.option(
"--no-metadata",
cls=BooleanOption,
default=core.get_input("no_metadata", required=False),
)
@click.option(
"--dirty", cls=BooleanOption, default=core.get_input("dirty", required=False)
)
@click.option(
"--tagged-metadata",
cls=BooleanOption,
default=core.get_input("tagged_metadata", required=False),
)
@click.option("--pattern", default=core.get_input("pattern", required=False))
@click.option("--format", default=core.get_input("format", required=False))
@click.option("--style", default=core.get_input("style", required=False))
@click.option(
"--latest-tag",
cls=BooleanOption,
default=core.get_input("latest_tag", required=False),
)
@click.option(
"--bump", cls=BooleanOption, default=core.get_input("bump", required=False)
)
@click.option("--tag-dir", default=core.get_input("tag_dir", required=False))
@click.option(
"--is-postrelease",
cls=BooleanOption,
default=core.get_input("is_postrelease", required=False),
)
def cli(
vcs,
metadata,
no_metadata,
dirty,
tagged_metadata,
pattern,
format,
style,
latest_tag,
bump,
tag_dir,
is_postrelease,
):
import inspect
from dunamai import Vcs, Version
from packaging.version import parse
core.start_group("Check dynamic version using `dunamai`")
kwargs = {}
if pattern:
kwargs["pattern"] = pattern
if latest_tag:
kwargs["latest_tag"] = True
if tag_dir:
kwargs["tag_dir"] = tag_dir
print(
">>> dunamai_version = Version.from_vcs(Vcs(%r)%s)"
% (
vcs,
", "
+ ", ".join(["{}={!r}".format(key, value) for key, value in kwargs.items()])
if len(kwargs)
else "",
)
)
dunamai_version = Version.from_vcs(Vcs(vcs), **kwargs)
print("Checked version: %s" % dunamai_version)
core.end_group()
core.start_group("Serialize dunamai version")
kwargs = {}
if metadata:
kwargs["metadata"] = True
if no_metadata:
kwargs["metadata"] = False
if dirty:
kwargs["dirty"] = dirty
if format:
kwargs["format"] = format
if style:
kwargs["style"] = style
if bump:
kwargs["bump"] = bump
if tagged_metadata:
kwargs["tagged_metadata"] = tagged_metadata
print(
">>> serialized_version = dunamai_version.serialize(%s)"
% ", ".join(["{}={!r}".format(key, value) for key, value in kwargs.items()])
)
serialized_version = dunamai_version.serialize(**kwargs).replace(".dev0", "")
print("Serialized version: %s" % serialized_version)
core.end_group()
core.start_group("Analyze the serialized version using `packaging.version.parse()`")
packaging_version = parse(serialized_version)
if packaging_version.is_postrelease and not is_postrelease:
print(
"Detected version is in postrelease format but prerelease format is desired"
)
print("Bumping version to be a prerelease format")
kwargs["bump"] = True
serialized_version = dunamai_version.serialize(**kwargs).replace(".dev0", "")
print(
">>> serialized_version = dunamai_version.serialize(%s)"
% ", ".join(["{}={!r}".format(key, value) for key, value in kwargs.items()])
)
print("Bumped version: %s" % serialized_version)
packaging_version = parse(serialized_version)
outputs = {}
outputs["version"] = "%s" % packaging_version
outputs["is_finalrelease"] = (
not packaging_version.is_prerelease
and not packaging_version.is_devrelease
and not packaging_version.is_postrelease
)
attributes = [
(attr, value)
for attr, value in inspect.getmembers(packaging_version)
if not attr.startswith("_")
]
for attr, value in attributes:
outputs[attr] = value
core.end_group()
core.start_group("Analyzed version attributes")
for attr, value in outputs.items():
print("{}: {}".format(attr, value))
core.end_group()
for attr, value in outputs.items():
core.set_output(attr, value)
def main():
cli() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
main()
| 30.915888 | 88 | 0.624849 |
794526e29a31c54d2161df4921e027fe1ca84512 | 1,742 | py | Python | tests/compare_versions/Cartesian3d_check_v0.1.0.py | tdegeus/GMatElastoPlastic | ace74265f46fbc83af16d237db84d147c57598fb | [
"MIT"
] | null | null | null | tests/compare_versions/Cartesian3d_check_v0.1.0.py | tdegeus/GMatElastoPlastic | ace74265f46fbc83af16d237db84d147c57598fb | [
"MIT"
] | 15 | 2019-04-11T14:17:01.000Z | 2021-08-30T07:10:09.000Z | tests/compare_versions/Cartesian3d_check_v0.1.0.py | tdegeus/GMatElastoPlastic | ace74265f46fbc83af16d237db84d147c57598fb | [
"MIT"
] | 1 | 2019-11-12T12:09:25.000Z | 2019-11-12T12:09:25.000Z | import h5py
import numpy as np
import GMatElastoPlastic.Cartesian3d as GMat
import unittest
class Test(unittest.TestCase):
def test_main(self):
with h5py.File('Cartesian3d_random.hdf5', 'r') as data:
shape = data['/shape'][...]
i = np.eye(3)
I = np.einsum('xy,ij', np.ones(shape), i)
I4 = np.einsum('xy,ijkl->xyijkl', np.ones(shape), np.einsum('il,jk', i, i))
I4rt = np.einsum('xy,ijkl->xyijkl', np.ones(shape), np.einsum('ik,jl', i, i))
I4s = (I4 + I4rt) / 2.0
mat = GMat.Matrix(shape[0], shape[1])
I = data['/LinearHardening/I'][...]
idx = data['/LinearHardening/idx'][...]
K = data['/LinearHardening/K'][...]
G = data['/LinearHardening/G'][...]
sigy0 = data['/LinearHardening/sigy0'][...]
H = data['/LinearHardening/H'][...]
mat.setLinearHardening(I, idx, K, G, sigy0, H)
I = data['/Elastic/I'][...]
idx = data['/Elastic/idx'][...]
K = data['/Elastic/K'][...]
G = data['/Elastic/G'][...]
mat.setElastic(I, idx, K, G)
for i in range(20):
mat.increment()
GradU = data['/random/{0:d}/GradU'.format(i)][...]
Eps = np.einsum('...ijkl,...lk->...ij', I4s, GradU)
self.assertTrue(np.allclose(mat.Stress(Eps), data['/random/{0:d}/Stress'.format(i)][...]))
self.assertTrue(np.allclose(mat.Tangent(Eps)[1], data['/random/{0:d}/Tangent'.format(i)][...]))
self.assertTrue(np.allclose(mat.Epsp(), data['/random/{0:d}/Epsp'.format(i)][...]))
if __name__ == '__main__':
unittest.main()
| 32.867925 | 111 | 0.502296 |
79452b31a8805674ceabdd448ab190d4282d1be7 | 20 | py | Python | backend/fsubs/config/__init__.py | joe-eklund/foreign-subs | 1ccbe4bc705abe89374738a2b6735346a83bd2c1 | [
"MIT"
] | 1 | 2020-05-01T15:19:23.000Z | 2020-05-01T15:19:23.000Z | backend/fsubs/config/__init__.py | joe-eklund/foreign-subs | 1ccbe4bc705abe89374738a2b6735346a83bd2c1 | [
"MIT"
] | 19 | 2020-05-20T02:20:00.000Z | 2022-02-13T13:55:35.000Z | backend/fsubs/config/__init__.py | joe-eklund/foreign-subs | 1ccbe4bc705abe89374738a2b6735346a83bd2c1 | [
"MIT"
] | null | null | null | """Fsubs config."""
| 10 | 19 | 0.55 |
79452b42364e2b7bea0ef34521c7afa8c58702f4 | 5,656 | py | Python | aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20190910/ModifyDomainRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20190910/ModifyDomainRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20190910/ModifyDomainRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkwaf_openapi.endpoint import endpoint_data
class ModifyDomainRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'waf-openapi', '2019-09-10', 'ModifyDomain','waf')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IpFollowStatus(self): # Integer
return self.get_query_params().get('IpFollowStatus')
def set_IpFollowStatus(self, IpFollowStatus): # Integer
self.add_query_param('IpFollowStatus', IpFollowStatus)
def get_SniHost(self): # String
return self.get_query_params().get('SniHost')
def set_SniHost(self, SniHost): # String
self.add_query_param('SniHost', SniHost)
def get_HttpPort(self): # String
return self.get_query_params().get('HttpPort')
def set_HttpPort(self, HttpPort): # String
self.add_query_param('HttpPort', HttpPort)
def get_Http2Port(self): # String
return self.get_query_params().get('Http2Port')
def set_Http2Port(self, Http2Port): # String
self.add_query_param('Http2Port', Http2Port)
def get_WriteTime(self): # Integer
return self.get_query_params().get('WriteTime')
def set_WriteTime(self, WriteTime): # Integer
self.add_query_param('WriteTime', WriteTime)
def get_SniStatus(self): # Integer
return self.get_query_params().get('SniStatus')
def set_SniStatus(self, SniStatus): # Integer
self.add_query_param('SniStatus', SniStatus)
def get_AccessHeaderMode(self): # Integer
return self.get_query_params().get('AccessHeaderMode')
def set_AccessHeaderMode(self, AccessHeaderMode): # Integer
self.add_query_param('AccessHeaderMode', AccessHeaderMode)
def get_AccessType(self): # String
return self.get_query_params().get('AccessType')
def set_AccessType(self, AccessType): # String
self.add_query_param('AccessType', AccessType)
def get_LogHeaders(self): # String
return self.get_query_params().get('LogHeaders')
def set_LogHeaders(self, LogHeaders): # String
self.add_query_param('LogHeaders', LogHeaders)
def get_AccessHeaders(self): # String
return self.get_query_params().get('AccessHeaders')
def set_AccessHeaders(self, AccessHeaders): # String
self.add_query_param('AccessHeaders', AccessHeaders)
def get_ConnectionTime(self): # Integer
return self.get_query_params().get('ConnectionTime')
def set_ConnectionTime(self, ConnectionTime): # Integer
self.add_query_param('ConnectionTime', ConnectionTime)
def get_ClusterType(self): # Integer
return self.get_query_params().get('ClusterType')
def set_ClusterType(self, ClusterType): # Integer
self.add_query_param('ClusterType', ClusterType)
def get_CloudNativeInstances(self): # String
return self.get_query_params().get('CloudNativeInstances')
def set_CloudNativeInstances(self, CloudNativeInstances): # String
self.add_query_param('CloudNativeInstances', CloudNativeInstances)
def get_HttpsRedirect(self): # Integer
return self.get_query_params().get('HttpsRedirect')
def set_HttpsRedirect(self, HttpsRedirect): # Integer
self.add_query_param('HttpsRedirect', HttpsRedirect)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_SourceIps(self): # String
return self.get_query_params().get('SourceIps')
def set_SourceIps(self, SourceIps): # String
self.add_query_param('SourceIps', SourceIps)
def get_Domain(self): # String
return self.get_query_params().get('Domain')
def set_Domain(self, Domain): # String
self.add_query_param('Domain', Domain)
def get_IsAccessProduct(self): # Integer
return self.get_query_params().get('IsAccessProduct')
def set_IsAccessProduct(self, IsAccessProduct): # Integer
self.add_query_param('IsAccessProduct', IsAccessProduct)
def get_ReadTime(self): # Integer
return self.get_query_params().get('ReadTime')
def set_ReadTime(self, ReadTime): # Integer
self.add_query_param('ReadTime', ReadTime)
def get_HttpsPort(self): # String
return self.get_query_params().get('HttpsPort')
def set_HttpsPort(self, HttpsPort): # String
self.add_query_param('HttpsPort', HttpsPort)
def get_LoadBalancing(self): # Integer
return self.get_query_params().get('LoadBalancing')
def set_LoadBalancing(self, LoadBalancing): # Integer
self.add_query_param('LoadBalancing', LoadBalancing)
def get_HttpToUserIp(self): # Integer
return self.get_query_params().get('HttpToUserIp')
def set_HttpToUserIp(self, HttpToUserIp): # Integer
self.add_query_param('HttpToUserIp', HttpToUserIp)
| 39.277778 | 79 | 0.756895 |
79452b53d1e8e8cf36e7a79627de913e9a401376 | 3,310 | py | Python | nem_bot/settings.py | TedKoba/nem_bot | 089314c8919d6750c4711b21cf8f303da44eec8b | [
"MIT"
] | null | null | null | nem_bot/settings.py | TedKoba/nem_bot | 089314c8919d6750c4711b21cf8f303da44eec8b | [
"MIT"
] | 6 | 2020-06-05T18:08:39.000Z | 2022-01-07T01:51:53.000Z | nem_bot/settings.py | TedKoba/nem_bot | 089314c8919d6750c4711b21cf8f303da44eec8b | [
"MIT"
] | null | null | null | """
Django settings for nem_bot project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from celery.schedules import crontab
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1^$e%1tx$52j_k6@)%kmep+++thbrr*(ym4mwa!stq=0p-+rvc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bot',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'nem_bot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'nem_bot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
CELERY_TIMEZONE = 'UTC'
CELERYBEAT_SCHEDULE = {
'post-every-hour': {
'task': 'bot.tasks.scheduled_nem_bot',
'schedule': crontab(hour='*'),
},
}
| 25.075758 | 91 | 0.689426 |
79452c6d3313ff4182e638d9ae58acd68478d42c | 5,120 | py | Python | dvc/command/version.py | arthurcgusmao/dvc | dff27bb163419bd2f93acaa0906dfdee7359d9d6 | [
"Apache-2.0"
] | 1 | 2020-08-01T08:31:18.000Z | 2020-08-01T08:31:18.000Z | dvc/command/version.py | arthurcgusmao/dvc | dff27bb163419bd2f93acaa0906dfdee7359d9d6 | [
"Apache-2.0"
] | null | null | null | dvc/command/version.py | arthurcgusmao/dvc | dff27bb163419bd2f93acaa0906dfdee7359d9d6 | [
"Apache-2.0"
] | null | null | null | import argparse
import itertools
import logging
import os
import pathlib
import platform
import uuid
from dvc.command.base import CmdBaseNoRepo, append_doc_link
from dvc.exceptions import DvcException, NotDvcRepoError
from dvc.scm.base import SCMError
from dvc.system import System
from dvc.utils import relpath
from dvc.utils.pkg import PKG
from dvc.version import __version__
try:
import psutil
except ImportError:
psutil = None
logger = logging.getLogger(__name__)
class CmdVersion(CmdBaseNoRepo):
def run(self):
from dvc.repo import Repo
package = PKG
if PKG is None:
package = ""
else:
package = f"({PKG})"
info = [
f"DVC version: {__version__} {package}",
"---------------------------------",
f"Platform: Python {platform.python_version()} on "
f"{platform.platform()}",
f"Supports: {self.get_supported_remotes()}",
]
try:
repo = Repo()
root_directory = repo.root_dir
# cache_dir might not exist yet (e.g. after `dvc init`), and we
# can't auto-create it, as it might cause issues if the user
# later decides to enable shared cache mode with
# `dvc config cache.shared group`.
if os.path.exists(repo.cache.local.cache_dir):
info.append(
"Cache types: {}".format(
self.get_linktype_support_info(repo)
)
)
if psutil:
fs_type = self.get_fs_type(repo.cache.local.cache_dir)
info.append(f"Cache directory: {fs_type}")
else:
logger.warning(
"Unable to detect supported link types, as cache "
"directory '{}' doesn't exist. It is usually auto-created "
"by commands such as `dvc add/fetch/pull/run/import`, "
"but you could create it manually to enable this "
"check.".format(relpath(repo.cache.local.cache_dir))
)
except NotDvcRepoError:
root_directory = os.getcwd()
except SCMError:
root_directory = os.getcwd()
info.append("Repo: dvc, git (broken)")
else:
if psutil:
fs_root = self.get_fs_type(os.path.abspath(root_directory))
info.append(f"Workspace directory: {fs_root}")
info.append("Repo: {}".format(_get_dvc_repo_info(repo)))
logger.info("\n".join(info))
return 0
@staticmethod
def get_fs_type(path):
partition = {
pathlib.Path(part.mountpoint): (part.fstype + " on " + part.device)
for part in psutil.disk_partitions(all=True)
}
path = pathlib.Path(path)
for parent in itertools.chain([path], path.parents):
if parent in partition:
return partition[parent]
return ("unknown", "none")
@staticmethod
def get_linktype_support_info(repo):
links = {
"reflink": (System.reflink, None),
"hardlink": (System.hardlink, System.is_hardlink),
"symlink": (System.symlink, System.is_symlink),
}
fname = "." + str(uuid.uuid4())
src = os.path.join(repo.cache.local.cache_dir, fname)
open(src, "w").close()
dst = os.path.join(repo.root_dir, fname)
cache = []
for name, (link, is_link) in links.items():
try:
link(src, dst)
status = "supported"
if is_link and not is_link(dst):
status = "broken"
os.unlink(dst)
except DvcException:
status = "not supported"
if status == "supported":
cache.append(name)
os.remove(src)
return ", ".join(cache)
@staticmethod
def get_supported_remotes():
from dvc.tree import TREES
supported_remotes = []
for tree_cls in TREES:
if not tree_cls.get_missing_deps():
supported_remotes.append(tree_cls.scheme)
if len(supported_remotes) == len(TREES):
return "All remotes"
if len(supported_remotes) == 1:
return supported_remotes
return ", ".join(supported_remotes)
def _get_dvc_repo_info(repo):
if repo.config.get("core", {}).get("no_scm", False):
return "dvc (no_scm)"
if repo.root_dir != repo.scm.root_dir:
return "dvc (subdir), git"
return "dvc, git"
def add_parser(subparsers, parent_parser):
VERSION_HELP = (
"Display the DVC version and system/environment information."
)
version_parser = subparsers.add_parser(
"version",
parents=[parent_parser],
description=append_doc_link(VERSION_HELP, "version"),
help=VERSION_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
version_parser.set_defaults(func=CmdVersion)
| 30.117647 | 79 | 0.564844 |
79452caad83e3bea15b4ba8efe573ff0bd4f3651 | 17,507 | py | Python | sponsors/admin.py | hugovk/pythondotorg | 8a02c109590e6e81aa05f8213e7fde7a3d05858f | [
"Apache-2.0"
] | null | null | null | sponsors/admin.py | hugovk/pythondotorg | 8a02c109590e6e81aa05f8213e7fde7a3d05858f | [
"Apache-2.0"
] | 31 | 2021-07-01T14:13:46.000Z | 2022-03-25T14:17:01.000Z | sponsors/admin.py | hugovk/pythondotorg | 8a02c109590e6e81aa05f8213e7fde7a3d05858f | [
"Apache-2.0"
] | null | null | null | from ordered_model.admin import OrderedModelAdmin
from polymorphic.admin import PolymorphicInlineSupportMixin, StackedPolymorphicInline
from django.template import Context, Template
from django.contrib import admin
from django.contrib.humanize.templatetags.humanize import intcomma
from django.urls import path, reverse
from django.utils.html import mark_safe
from .models import (
SponsorshipPackage,
SponsorshipProgram,
SponsorshipBenefit,
Sponsor,
Sponsorship,
SponsorContact,
SponsorBenefit,
LegalClause,
Contract,
BenefitFeatureConfiguration,
LogoPlacementConfiguration,
TieredQuantityConfiguration,
)
from sponsors import views_admin
from sponsors.forms import SponsorshipReviewAdminForm, SponsorBenefitAdminInlineForm
from cms.admin import ContentManageableModelAdmin
@admin.register(SponsorshipProgram)
class SponsorshipProgramAdmin(OrderedModelAdmin):
ordering = ("order",)
list_display = [
"name",
"move_up_down_links",
]
class BenefitFeatureConfigurationInline(StackedPolymorphicInline):
class LogoPlacementConfigurationInline(StackedPolymorphicInline.Child):
model = LogoPlacementConfiguration
class TieredQuantityConfigurationInline(StackedPolymorphicInline.Child):
model = TieredQuantityConfiguration
model = BenefitFeatureConfiguration
child_inlines = [
LogoPlacementConfigurationInline,
TieredQuantityConfigurationInline,
]
@admin.register(SponsorshipBenefit)
class SponsorshipBenefitAdmin(PolymorphicInlineSupportMixin, OrderedModelAdmin):
change_form_template = "sponsors/admin/sponsorshipbenefit_change_form.html"
inlines = [BenefitFeatureConfigurationInline]
ordering = ("program", "order")
list_display = [
"program",
"short_name",
"package_only",
"internal_value",
"move_up_down_links",
]
list_filter = ["program", "package_only", "packages"]
search_fields = ["name"]
fieldsets = [
(
"Public",
{
"fields": (
"name",
"description",
"program",
"packages",
"package_only",
"new",
"unavailable",
),
},
),
(
"Internal",
{
"fields": (
"internal_description",
"internal_value",
"capacity",
"soft_capacity",
"legal_clauses",
"conflicts",
)
},
),
]
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/update-related-sponsorships",
self.admin_site.admin_view(self.update_related_sponsorships),
name="sponsors_sponsorshipbenefit_update_related",
),
]
return my_urls + urls
def update_related_sponsorships(self, *args, **kwargs):
return views_admin.update_related_sponsorships(self, *args, **kwargs)
@admin.register(SponsorshipPackage)
class SponsorshipPackageAdmin(OrderedModelAdmin):
ordering = ("order",)
list_display = ["name", "move_up_down_links"]
def get_readonly_fields(self, request, obj=None):
if request.user.is_superuser:
return []
return ["logo_dimension"]
class SponsorContactInline(admin.TabularInline):
model = SponsorContact
raw_id_fields = ["user"]
extra = 0
@admin.register(Sponsor)
class SponsorAdmin(ContentManageableModelAdmin):
inlines = [SponsorContactInline]
search_fields = ["name"]
class SponsorBenefitInline(admin.TabularInline):
model = SponsorBenefit
form = SponsorBenefitAdminInlineForm
fields = ["sponsorship_benefit", "benefit_internal_value"]
extra = 0
def has_add_permission(self, request, obj=None):
has_add_permission = super().has_add_permission(request, obj=obj)
match = request.resolver_match
if match.url_name == "sponsors_sponsorship_change":
sponsorship = self.parent_model.objects.get(pk=match.kwargs["object_id"])
has_add_permission = has_add_permission and sponsorship.open_for_editing
return has_add_permission
def get_readonly_fields(self, request, obj=None):
if obj and not obj.open_for_editing:
return ["sponsorship_benefit", "benefit_internal_value"]
return []
def has_delete_permission(self, request, obj=None):
if not obj:
return True
return obj.open_for_editing
@admin.register(Sponsorship)
class SponsorshipAdmin(admin.ModelAdmin):
change_form_template = "sponsors/admin/sponsorship_change_form.html"
form = SponsorshipReviewAdminForm
inlines = [SponsorBenefitInline]
search_fields = ["sponsor__name"]
list_display = [
"sponsor",
"status",
"package",
"applied_on",
"approved_on",
"start_date",
"end_date",
]
list_filter = ["status", "package"]
fieldsets = [
(
"Sponsorship Data",
{
"fields": (
"sponsor",
"status",
"package",
"for_modified_package",
"sponsorship_fee",
"get_estimated_cost",
"start_date",
"end_date",
"get_contract",
"level_name",
),
},
),
(
"Sponsor Detailed Information",
{
"fields": (
"get_sponsor_name",
"get_sponsor_description",
"get_sponsor_landing_page_url",
"get_sponsor_web_logo",
"get_sponsor_print_logo",
"get_sponsor_primary_phone",
"get_sponsor_mailing_address",
"get_sponsor_contacts",
),
},
),
(
"Events dates",
{
"fields": (
"applied_on",
"approved_on",
"rejected_on",
"finalized_on",
),
"classes": ["collapse"],
},
),
]
def get_readonly_fields(self, request, obj):
readonly_fields = [
"for_modified_package",
"sponsor",
"status",
"applied_on",
"rejected_on",
"approved_on",
"finalized_on",
"level_name",
"get_estimated_cost",
"get_sponsor_name",
"get_sponsor_description",
"get_sponsor_landing_page_url",
"get_sponsor_web_logo",
"get_sponsor_print_logo",
"get_sponsor_primary_phone",
"get_sponsor_mailing_address",
"get_sponsor_contacts",
"get_contract",
]
if obj and obj.status != Sponsorship.APPLIED:
extra = ["start_date", "end_date", "package", "level_name", "sponsorship_fee"]
readonly_fields.extend(extra)
return readonly_fields
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.select_related("sponsor")
def get_estimated_cost(self, obj):
cost = None
html = "This sponsorship has not customizations so there's no estimated cost"
if obj.for_modified_package:
msg = "This sponsorship has customizations and this cost is a sum of all benefit's internal values from when this sponsorship was created"
cost = intcomma(obj.estimated_cost)
html = f"{cost} USD <br/><b>Important: </b> {msg}"
return mark_safe(html)
get_estimated_cost.short_description = "Estimated cost"
def get_contract(self, obj):
if not obj.contract:
return "---"
url = reverse("admin:sponsors_contract_change", args=[obj.contract.pk])
html = f"<a href='{url}' target='_blank'>{obj.contract}</a>"
return mark_safe(html)
get_contract.short_description = "Contract"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/reject",
# TODO: maybe it would be better to create a specific
# group or permission to review sponsorship applications
self.admin_site.admin_view(self.reject_sponsorship_view),
name="sponsors_sponsorship_reject",
),
path(
"<int:pk>/approve-existing",
self.admin_site.admin_view(self.approve_signed_sponsorship_view),
name="sponsors_sponsorship_approve_existing_contract",
),
path(
"<int:pk>/approve",
self.admin_site.admin_view(self.approve_sponsorship_view),
name="sponsors_sponsorship_approve",
),
path(
"<int:pk>/enable-edit",
self.admin_site.admin_view(self.rollback_to_editing_view),
name="sponsors_sponsorship_rollback_to_edit",
),
]
return my_urls + urls
def get_sponsor_name(self, obj):
return obj.sponsor.name
get_sponsor_name.short_description = "Name"
def get_sponsor_description(self, obj):
return obj.sponsor.description
get_sponsor_description.short_description = "Description"
def get_sponsor_landing_page_url(self, obj):
return obj.sponsor.landing_page_url
get_sponsor_landing_page_url.short_description = "Landing Page URL"
def get_sponsor_web_logo(self, obj):
html = "{% load thumbnail %}{% thumbnail sponsor.web_logo '150x150' format='PNG' quality=100 as im %}<img src='{{ im.url}}'/>{% endthumbnail %}"
template = Template(html)
context = Context({'sponsor': obj.sponsor})
html = template.render(context)
return mark_safe(html)
get_sponsor_web_logo.short_description = "Web Logo"
def get_sponsor_print_logo(self, obj):
img = obj.sponsor.print_logo
html = ""
if img:
html = "{% load thumbnail %}{% thumbnail img '150x150' format='PNG' quality=100 as im %}<img src='{{ im.url}}'/>{% endthumbnail %}"
template = Template(html)
context = Context({'img': img})
html = template.render(context)
return mark_safe(html) if html else "---"
get_sponsor_print_logo.short_description = "Print Logo"
def get_sponsor_primary_phone(self, obj):
return obj.sponsor.primary_phone
get_sponsor_primary_phone.short_description = "Primary Phone"
def get_sponsor_mailing_address(self, obj):
sponsor = obj.sponsor
city_row = (
f"{sponsor.city} - {sponsor.get_country_display()} ({sponsor.country})"
)
if sponsor.state:
city_row = f"{sponsor.city} - {sponsor.state} - {sponsor.get_country_display()} ({sponsor.country})"
mail_row = sponsor.mailing_address_line_1
if sponsor.mailing_address_line_2:
mail_row += f" - {sponsor.mailing_address_line_2}"
html = f"<p>{city_row}</p>"
html += f"<p>{mail_row}</p>"
html += f"<p>{sponsor.postal_code}</p>"
return mark_safe(html)
get_sponsor_mailing_address.short_description = "Mailing/Billing Address"
def get_sponsor_contacts(self, obj):
html = ""
contacts = obj.sponsor.contacts.all()
primary = [c for c in contacts if c.primary]
not_primary = [c for c in contacts if not c.primary]
if primary:
html = "<b>Primary contacts</b><ul>"
html += "".join(
[f"<li>{c.name}: {c.email} / {c.phone}</li>" for c in primary]
)
html += "</ul>"
if not_primary:
html += "<b>Other contacts</b><ul>"
html += "".join(
[f"<li>{c.name}: {c.email} / {c.phone}</li>" for c in not_primary]
)
html += "</ul>"
return mark_safe(html)
get_sponsor_contacts.short_description = "Contacts"
def rollback_to_editing_view(self, request, pk):
return views_admin.rollback_to_editing_view(self, request, pk)
def reject_sponsorship_view(self, request, pk):
return views_admin.reject_sponsorship_view(self, request, pk)
def approve_sponsorship_view(self, request, pk):
return views_admin.approve_sponsorship_view(self, request, pk)
def approve_signed_sponsorship_view(self, request, pk):
return views_admin.approve_signed_sponsorship_view(self, request, pk)
@admin.register(LegalClause)
class LegalClauseModelAdmin(OrderedModelAdmin):
list_display = ["internal_name"]
@admin.register(Contract)
class ContractModelAdmin(admin.ModelAdmin):
change_form_template = "sponsors/admin/contract_change_form.html"
list_display = [
"id",
"sponsorship",
"created_on",
"last_update",
"status",
"get_revision",
"document_link",
]
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.select_related("sponsorship__sponsor")
def get_revision(self, obj):
return obj.revision if obj.is_draft else "Final"
get_revision.short_description = "Revision"
fieldsets = [
(
"Info",
{
"fields": ("get_sponsorship_url", "status", "revision"),
},
),
(
"Editable",
{
"fields": (
"sponsor_info",
"sponsor_contact",
"benefits_list",
"legal_clauses",
),
},
),
(
"Files",
{
"fields": (
"document",
"document_docx",
"signed_document",
)
},
),
(
"Activities log",
{
"fields": (
"created_on",
"last_update",
"sent_on",
),
"classes": ["collapse"],
},
),
]
def get_readonly_fields(self, request, obj):
readonly_fields = [
"status",
"created_on",
"last_update",
"sent_on",
"sponsorship",
"revision",
"document",
"document_docx",
"get_sponsorship_url",
]
if obj and not obj.is_draft:
extra = [
"sponsor_info",
"sponsor_contact",
"benefits_list",
"legal_clauses",
]
readonly_fields.extend(extra)
return readonly_fields
def document_link(self, obj):
html, url, msg = "---", "", ""
if obj.is_draft:
url = obj.preview_url
msg = "Preview document"
elif obj.document:
url = obj.document.url
msg = "Download Contract"
elif obj.signed_document:
url = obj.signed_document.url
msg = "Download Signed Contract"
if url and msg:
html = f'<a href="{url}" target="_blank">{msg}</a>'
return mark_safe(html)
document_link.short_description = "Contract document"
def get_sponsorship_url(self, obj):
if not obj.sponsorship:
return "---"
url = reverse("admin:sponsors_sponsorship_change", args=[obj.sponsorship.pk])
html = f"<a href='{url}' target='_blank'>{obj.sponsorship}</a>"
return mark_safe(html)
get_sponsorship_url.short_description = "Sponsorship"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/preview",
self.admin_site.admin_view(self.preview_contract_view),
name="sponsors_contract_preview",
),
path(
"<int:pk>/send",
self.admin_site.admin_view(self.send_contract_view),
name="sponsors_contract_send",
),
path(
"<int:pk>/execute",
self.admin_site.admin_view(self.execute_contract_view),
name="sponsors_contract_execute",
),
path(
"<int:pk>/nullify",
self.admin_site.admin_view(self.nullify_contract_view),
name="sponsors_contract_nullify",
),
]
return my_urls + urls
def preview_contract_view(self, request, pk):
return views_admin.preview_contract_view(self, request, pk)
def send_contract_view(self, request, pk):
return views_admin.send_contract_view(self, request, pk)
def execute_contract_view(self, request, pk):
return views_admin.execute_contract_view(self, request, pk)
def nullify_contract_view(self, request, pk):
return views_admin.nullify_contract_view(self, request, pk)
| 31.71558 | 152 | 0.570115 |
79452cc71e03804cc55f87ac1e7ea1860585208f | 1,492 | py | Python | e3sm_diags/parameter/__init__.py | E3SM-Project/acme_diags | 0dce677a359aff9b385fb9a6184ccefddb067f47 | [
"BSD-3-Clause"
] | 2 | 2018-05-15T02:09:07.000Z | 2018-06-06T20:35:45.000Z | e3sm_diags/parameter/__init__.py | E3SM-Project/acme_diags | 0dce677a359aff9b385fb9a6184ccefddb067f47 | [
"BSD-3-Clause"
] | 62 | 2018-04-20T20:12:25.000Z | 2019-01-17T20:07:38.000Z | e3sm_diags/parameter/__init__.py | E3SM-Project/acme_diags | 0dce677a359aff9b385fb9a6184ccefddb067f47 | [
"BSD-3-Clause"
] | 1 | 2018-05-15T02:09:02.000Z | 2018-05-15T02:09:02.000Z | from .annual_cycle_zonal_mean_parameter import ACzonalmeanParameter
from .area_mean_time_series_parameter import AreaMeanTimeSeriesParameter
from .arm_diags_parameter import ARMDiagsParameter
from .core_parameter import CoreParameter
from .diurnal_cycle_parameter import DiurnalCycleParameter
from .enso_diags_parameter import EnsoDiagsParameter
from .lat_lon_land_parameter import LatLonLandParameter
from .meridional_mean_2d_parameter import MeridionalMean2dParameter
from .qbo_parameter import QboParameter
from .streamflow_parameter import StreamflowParameter
from .tc_analysis_parameter import TCAnalysisParameter
from .zonal_mean_2d_parameter import ZonalMean2dParameter
from .zonal_mean_2d_stratosphere_parameter import ZonalMean2dStratosphereParameter
SET_TO_PARAMETERS = {
"zonal_mean_xy": CoreParameter,
"zonal_mean_2d": ZonalMean2dParameter,
"zonal_mean_2d_stratosphere": ZonalMean2dStratosphereParameter,
"meridional_mean_2d": MeridionalMean2dParameter,
"lat_lon": CoreParameter,
"polar": CoreParameter,
"cosp_histogram": CoreParameter,
"area_mean_time_series": AreaMeanTimeSeriesParameter,
"enso_diags": EnsoDiagsParameter,
"qbo": QboParameter,
"streamflow": StreamflowParameter,
"diurnal_cycle": DiurnalCycleParameter,
"arm_diags": ARMDiagsParameter,
"tc_analysis": TCAnalysisParameter,
"annual_cycle_zonal_mean": ACzonalmeanParameter,
"lat_lon_land": LatLonLandParameter,
"aerosol_aeronet": CoreParameter,
}
| 43.882353 | 82 | 0.837131 |
79452ce8045692c2f0c4028ec56e7288a7fe3638 | 3,128 | py | Python | sumoenv.py | ccwutw/rl-control | 39ec21871f3cb435841dfb4b0646bcb2cbbc79b7 | [
"MIT"
] | null | null | null | sumoenv.py | ccwutw/rl-control | 39ec21871f3cb435841dfb4b0646bcb2cbbc79b7 | [
"MIT"
] | null | null | null | sumoenv.py | ccwutw/rl-control | 39ec21871f3cb435841dfb4b0646bcb2cbbc79b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
class SumoEnv:
place_len = 7.5
place_offset = 8.50
lane_len = 10
lane_ids = ['-gneE0_0','-gneE0_1','-gneE0_2','-gneE1_0','-gneE1_1','-gneE1_2','-gneE2_0','-gneE2_1','-gneE2_2','-gneE3_0','-gneE3_1','-gneE3_2']
def __init__(self, label='default', gui_f=False):
self.label = label
self.wt_last = 0.
self.ncars = 0
#exe = 'sumo-gui.exe' if gui_f else 'sumo.exe' #WINDOWS
exe = 'sumo-gui' if gui_f else 'sumo' #LUNUX
sumoBinary = os.path.join(os.environ['SUMO_HOME'], 'bin', exe)
#sumoBinary = checkBinary('sumo')
self.sumoCmd = [sumoBinary, '-c', 'intersection.sumocfg']
return
def get_state_d(self):
state = np.zeros(self.lane_len * 12 + 4, dtype=np.float32)
for ilane in range(0, 12):
lane_id = self.lane_ids[ilane]
ncars = traci.lane.getLastStepVehicleNumber(lane_id)
cars = traci.lane.getLastStepVehicleIDs(lane_id)
for icar in cars:
xcar, ycar = traci.vehicle.getPosition(icar)
if ilane < 3:
pos = (ycar - self.place_offset) / self.place_len
elif ilane < 6:
pos = (xcar - self.place_offset) / self.place_len
elif ilane < 9:
pos = (-ycar - self.place_offset) / self.place_len
else:
pos = (-xcar - self.place_offset) / self.place_len
if pos > self.lane_len - 1.:
continue
pos = np.clip(pos, 0., self.lane_len - 1. - 1e-6)
ipos = int(pos)
state[int(ilane * self.lane_len + ipos)] += 1. - pos + ipos
state[int(ilane * self.lane_len + ipos + 1)] += pos - ipos
state[self.lane_len * 12:self.lane_len * 12+4] = np.eye(4)[traci.trafficlight.getPhase('gneJ00')]
return state
def step_d(self, action):
done = False
# traci.switch(self.label)
action = np.squeeze(action)
traci.trafficlight.setPhase('gneJ00', action)
traci.simulationStep()
traci.simulationStep()
self.ncars += traci.simulation.getDepartedNumber()
state = self.get_state_d()
wt = 0
for ilane in range(0, 12):
lane_id = self.lane_ids[ilane]
wt += traci.lane.getWaitingTime(lane_id)
reward = - (wt - self.wt_last)*0.004
if self.ncars > 250:
done = True
return state, reward, done, np.array([[reward]])
def reset(self):
self.wt_last = 0.
self.ncars = 0
traci.start(self.sumoCmd, label=self.label)
traci.trafficlight.setProgram('gneJ00', '0')
traci.simulationStep()
return self.get_state_d()
def close(self):
traci.close()
| 32.583333 | 148 | 0.560422 |
79452da2b5acc0e6a3432a246a211a80ef11e87a | 5,321 | py | Python | functions/app.py | msakasai/mm-list-channels | 0a0fa1d6fdb246abdfa008dd08d0b2e0227f38d8 | [
"MIT"
] | null | null | null | functions/app.py | msakasai/mm-list-channels | 0a0fa1d6fdb246abdfa008dd08d0b2e0227f38d8 | [
"MIT"
] | null | null | null | functions/app.py | msakasai/mm-list-channels | 0a0fa1d6fdb246abdfa008dd08d0b2e0227f38d8 | [
"MIT"
] | null | null | null | import os
import json
import requests
from jinja2 import Template
from aws_lambda_powertools import Logger
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.data_classes import EventBridgeEvent
from aws_lambda_powertools.utilities.data_classes import APIGatewayProxyEvent
logger = Logger()
@lambda_handler_decorator
def middleware_before_after(handler, event, context):
try:
# logic_before_handler_execution()
response = handler(event, context)
# logic_after_handler_execution()
return response
except Exception as e:
logger.error(e)
raise e
@middleware_before_after
def lambda_handler(event, context):
""" EventBridge Handler """
event: EventBridgeEvent = EventBridgeEvent(event)
mm_list_channels()
return 0
@middleware_before_after
def api_handler(event, context):
""" API Gateway Handler """
# print(event)
event: APIGatewayProxyEvent = APIGatewayProxyEvent(event)
mm_list_channels(event['queryStringParameters'])
return {
'statusCode': 200,
'body': '{"message": "Channel list OK."}'
}
def mm_list_channels(params: dict = {}):
# print(params)
mm_channels = MmChannels(
os.getenv('MM_TOKEN'),
os.getenv('MM_BASE_URL'),
os.getenv('MM_TEAM_ID') if not params else params['team_id'],
os.getenv('MM_POST_CHANNEL_ID') if not params else params['channel_id'],
None if not params else params['user_id'],
False if not params else True
)
logger.info(mm_channels)
mm_channels()
MM_POST_TEXT_TMPL = """
| # | channel | display_name | header | purpose |
|:-:|:--|:--|:--|:--|
{%- set ns = namespace(idx = 1) -%}
{% for c in chs %}
| {{ ns.idx }} | ~{{ c['name'] }} | {{ c['display_name'] }} | {{ c['header'] }} | {{ c['purpose'] }} |
{%- set ns.idx = ns.idx + 1 -%}
{%- endfor %}
"""
class MmChannels:
__slot__ = [
'token',
'base_url',
'team_id',
'post_channel_id',
'user_id',
'ephemeral',
'mm_channels_api_url',
'mm_post_api_url',
'channels'
]
def __str__(self):
return f'Public channel list post. base_url: {self.base_url}, team_id: {self.team_id}, post_channel_id: {self.post_channel_id}, user_id: {self.user_id}, mm_channels_api_url: {self.mm_channels_api_url}, mm_post_api_url: {self.mm_post_api_url}'
def __init__(self, _token: str, _base_url: str, _team_id: str, _post_channel_id: str, _user_id: str, _ephemeral: bool):
self.token = _token
self.base_url = _base_url
self.team_id = _team_id
self.post_channel_id = _post_channel_id
self.user_id = _user_id
self.ephemeral = _ephemeral
self.mm_channels_api_url = f'{_base_url}/api/v4/teams/{self.team_id}/channels'
self.mm_post_api_url = f'{_base_url}/api/v4/posts' + ('/ephemeral' if self.ephemeral else '')
def channel_list(self) -> None:
_channel_list = []
headers = {
'Authorization': f'Bearer {self.token}',
'Content-Type': 'application/json'
}
page = 0
while True:
params = {'page': page, 'per_page': 10}
response = requests.get(self.mm_channels_api_url, headers=headers, params=params)
status = response.status_code
if status == 200:
_channel_list += [
{
'name': d['name'],
'display_name': d['display_name'],
'lower_display_name': d['display_name'].lower(),
'header': d['header'].replace('\n', '').replace('https://', ''),
'purpose': d['purpose'].replace('\n', '').replace('https://', ''),
} for d in response.json()]
else:
logger.error(response.json())
raise Exception(status)
if len(response.json()) < 10:
break
page += 1
self.channels = _channel_list
def sorted_channels(self) -> None:
self.channels = sorted(self.channels, key=lambda x: x['name'])
def post_text(self) -> str:
template = Template(MM_POST_TEXT_TMPL)
return template.render(chs=self.channels)
def post(self, _post_text: str) -> None:
# print(_post_text)
headers = {
'Authorization': f'Bearer {self.token}',
'Content-Type': 'application/json'
}
_params = {
'channel_id': self.post_channel_id,
'message': _post_text,
}
if self.ephemeral:
params = {
'user_id': self.user_id,
'post': _params
}
else:
params = _params
# print(params)
response = requests.post(self.mm_post_api_url, headers=headers, json=params)
if (response.status_code != 201):
logger.error(response.json())
raise Exception(response.status_code)
def __call__(self):
self.channel_list()
# print(self.channels)
self.sorted_channels()
# print(self.channels)
self.post(self.post_text())
| 30.757225 | 250 | 0.586544 |
79452dfbc3e7abe63d8b213ca5aa1eece6ac29ac | 1,710 | py | Python | app/src/domain/entity/slack.py | hagifoo/gae-pomodoro | 6babdfc8d4ac8483b59b4da1d2c9b13fddcc4383 | [
"MIT"
] | null | null | null | app/src/domain/entity/slack.py | hagifoo/gae-pomodoro | 6babdfc8d4ac8483b59b4da1d2c9b13fddcc4383 | [
"MIT"
] | null | null | null | app/src/domain/entity/slack.py | hagifoo/gae-pomodoro | 6babdfc8d4ac8483b59b4da1d2c9b13fddcc4383 | [
"MIT"
] | null | null | null | from infra import slack
class Slack(object):
def __init__(self, owner, firebase, name=None, domain=None,
channel_id=None, mention=''):
self._owner = owner
self._name = name
self._domain = domain
self._channel_id = channel_id
self._token = None
self._mention = mention
self._firebase = firebase
@property
def owner(self):
return self._owner
@property
def channel_id(self):
return self._channel_id
def update(self, token, name, domain):
self._firebase.update(name, domain)
self._firebase.update_token(token)
@property
def token(self):
if not self._token:
self._token = self._firebase.get_token()
return self._token
def is_notify(self):
return self._channel_id is not None
def get_channels(self):
channels = slack.API(self.token).get_channels_list()
return [{'name': c['name'], 'id': c['id']} for c in channels['channels']]
def notify_start(self):
text = u'''{}'s pomodoro started! {} min. {}'''.format(
self.owner.name,
self.owner.timer.pomodoro_time / 60,
self._mention)
slack.API(self.token).post_message(self.channel_id, text)
def notify_stop(self):
text = u'''{}'s pomodoro stopped!'''.format(
self.owner.name)
slack.API(self.token).post_message(self.channel_id, text)
def notify_end(self):
text = u'''{}'s pomodoro completed! {} min break. {}'''.format(
self.owner.name, self.owner.timer.break_time / 60, self._mention)
slack.API(self.token).post_message(self.channel_id, text)
| 30 | 81 | 0.605263 |
79452e7dd21ddf621f9148e46f1468a09de9f939 | 3,666 | py | Python | DjangoQuickTour/DjangoQuickTour/settings.py | mingyeh/DjangoQuickTour | 22d89135c0f708d92967039880349524c00ec461 | [
"MIT"
] | 1 | 2017-01-02T03:41:31.000Z | 2017-01-02T03:41:31.000Z | DjangoQuickTour/DjangoQuickTour/settings.py | mingyeh/DjangoQuickTour | 22d89135c0f708d92967039880349524c00ec461 | [
"MIT"
] | null | null | null | DjangoQuickTour/DjangoQuickTour/settings.py | mingyeh/DjangoQuickTour | 22d89135c0f708d92967039880349524c00ec461 | [
"MIT"
] | 2 | 2017-07-01T23:31:39.000Z | 2019-03-04T16:02:15.000Z | """
Django settings for DjangoQuickTour project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bn@(8@nb2@jn6tck7d(cz3b3@95e1-q_#-p0pws&9)5))8ip5)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoQuickTour.urls'
from django.conf import global_settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request'
]
},
},
]
BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
WSGI_APPLICATION = 'DjangoQuickTour.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| 27.358209 | 91 | 0.690125 |
79453186938c8e3b86ff70fccc7ee7f84d5593ed | 23,732 | py | Python | instapy/like_util.py | SeoMinbong/InstaPy | 0d180d4ed182241d3e68f0e91dcad50df72696ac | [
"MIT"
] | 1 | 2019-02-21T02:26:07.000Z | 2019-02-21T02:26:07.000Z | instapy/like_util.py | SeoMinbong/InstaPy | 0d180d4ed182241d3e68f0e91dcad50df72696ac | [
"MIT"
] | null | null | null | instapy/like_util.py | SeoMinbong/InstaPy | 0d180d4ed182241d3e68f0e91dcad50df72696ac | [
"MIT"
] | null | null | null | import re
import random
"""Module that handles the like features"""
from .util import format_number
from math import ceil
from re import findall
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
from .time_util import sleep
from .util import update_activity
from .util import add_user_to_blacklist
from .util import click_element
def get_links_from_feed(browser, amount, num_of_search, logger):
"""Fetches random number of links from feed and returns a list of links"""
browser.get('https://www.instagram.com')
# update server calls
update_activity()
sleep(2)
for i in range(num_of_search + 1):
browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
sleep(2)
# get links
link_elems = browser.find_elements_by_xpath(
"//article/div[2]/div[2]/a")
total_links = len(link_elems)
logger.info("Total of links feched for analysis: {}".format(total_links))
links = []
try:
if link_elems:
links = [link_elem.get_attribute('href') for link_elem in link_elems]
logger.info("~~~~~~~~~~~~~~~~~~~~~~~~~~~")
for i, link in enumerate(links):
print(i, link)
logger.info("~~~~~~~~~~~~~~~~~~~~~~~~~~~")
except BaseException as e:
logger.error("link_elems error {}".format(str(e)))
return links
def get_links_for_location(browser,
location,
amount,
logger,
media=None,
skip_top_posts=True):
"""Fetches the number of links specified
by amount and returns a list of links"""
if media is None:
# All known media types
media = ['', 'Post', 'Video']
elif media == 'Photo':
# Include posts with multiple images in it
media = ['', 'Post']
else:
# Make it an array to use it in the following part
media = [media]
browser.get('https://www.instagram.com/explore/locations/' + location)
# update server calls
update_activity()
sleep(2)
top_elements = browser.find_element_by_xpath('//main/article/div[1]')
top_posts = top_elements.find_elements_by_tag_name('a')
sleep(1)
if skip_top_posts:
main_elem = browser.find_element_by_xpath('//main/article/div[2]')
else:
main_elem = browser.find_element_by_tag_name('main')
link_elems = main_elem.find_elements_by_tag_name('a')
sleep(1)
if not link_elems: # this location does not have `Top Posts` or it really is empty..
main_elem = browser.find_element_by_xpath('//main/article/div[1]')
top_posts = []
sleep(2)
# Get links
links = get_links(browser, location, logger, media, main_elem)
filtered_links = len(links)
try_again = 0
sc_rolled = 0
nap = 1.5
put_sleep = 0
try:
while filtered_links in range(1, amount):
if sc_rolled > 100:
logger.info("Scrolled too much! ~ sleeping a bit :>")
sleep(600)
sc_rolled = 0
for i in range(3):
browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
sc_rolled += 1
update_activity()
sleep(
nap) # if not slept, and internet speed is low, instagram will only scroll one time, instead of many times you sent scroll command...
sleep(3)
links.extend(get_links(browser, location, logger, media, main_elem))
links_all = links # uniqify links while preserving order
s = set()
links = []
for i in links_all:
if i not in s:
s.add(i)
links.append(i)
if len(links) == filtered_links:
try_again += 1
nap = 3 if try_again == 1 else 5
logger.info("Insufficient amount of links ~ trying again: {}".format(try_again))
sleep(3)
if try_again > 2: # you can try again as much as you want by changing this number
if put_sleep < 1 and filtered_links <= 21:
logger.info("Cor! Did you send too many requests? ~ let's rest some")
sleep(600)
put_sleep += 1
browser.execute_script("location.reload()")
try_again = 0
sleep(10)
main_elem = (browser.find_element_by_xpath('//main/article/div[1]') if not link_elems else
browser.find_element_by_xpath('//main/article/div[2]') if skip_top_posts else
browser.find_element_by_tag_name('main'))
else:
logger.info("'{}' location POSSIBLY has less images than desired...".format(location))
break
else:
filtered_links = len(links)
try_again = 0
nap = 1.5
except:
raise
sleep(4)
return links[:amount]
def get_links_for_tag(browser,
tag,
amount,
logger,
media=None,
skip_top_posts=True):
"""Fetches the number of links specified
by amount and returns a list of links"""
if media is None:
# All known media types
media = ['', 'Post', 'Video']
elif media == 'Photo':
# Include posts with multiple images in it
media = ['', 'Post']
else:
# Make it an array to use it in the following part
media = [media]
browser.get('https://www.instagram.com/explore/tags/'
+ (tag[1:] if tag[:1] == '#' else tag))
# update server calls
update_activity()
sleep(2)
# clicking load more
body_elem = browser.find_element_by_tag_name('body')
sleep(2)
abort = True
# Get links
if skip_top_posts:
main_elem = browser.find_element_by_xpath('//main/article/div[2]')
else:
main_elem = browser.find_element_by_tag_name('main')
total_links = 0
links = []
filtered_links = 0
try_again = 0
default_load = 21 if not skip_top_posts else 12
while filtered_links < amount:
if amount >= default_load:
if filtered_links >= default_load:
for i in range(3):
browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
update_activity()
sleep(1.5)
link_elems = main_elem.find_elements_by_tag_name('a')
if not link_elems:
main_elem2 = browser.find_element_by_xpath('//main/article/div[1]')
link_elems = main_elem2.find_elements_by_tag_name('a')
total_links += len(link_elems)
try:
if link_elems:
new_links = [link_elem.get_attribute('href') for link_elem in link_elems
if link_elem and link_elem.text in media]
for new_link in new_links:
links.append(new_link)
links_all = links
s = set()
links = []
for i in links_all:
if i not in s:
s.add(i)
links.append(i)
if len(links) == filtered_links:
try_again += 1
if try_again > 1:
logger.info("This tag has less pictures than intended..")
break
else:
filtered_links = len(links)
try_again = 0
if filtered_links < default_load and amount > filtered_links:
logger.info("This tag has so less pictures than expected...")
break
else:
logger.warning("This tag does not contain a picture")
break
except BaseException as e:
logger.error("link_elems error {}".format(str(e)))
break
while (filtered_links < amount) and not abort:
amount_left = amount - filtered_links
# Average items of the right media per page loaded
new_per_page = ceil(12 * filtered_links / total_links)
if new_per_page == 0:
# Avoid division by zero
new_per_page = 1. / 12.
# Number of page load needed
new_needed = int(ceil(amount_left / new_per_page))
if new_needed > 12:
# Don't go bananas trying to get all of instagram!
new_needed = 12
for i in range(new_needed): # add images x * 12
# Keep the latest window active while loading more posts
before_load = total_links
body_elem.send_keys(Keys.END)
# update server calls
update_activity()
sleep(1)
body_elem.send_keys(Keys.HOME)
sleep(1)
link_elems = main_elem.find_elements_by_tag_name('a')
total_links = len(link_elems)
abort = (before_load == total_links)
if abort:
break
links = [link_elem.get_attribute('href') for link_elem in link_elems
if link_elem.text in media]
filtered_links = len(links)
return links[:amount]
def get_links_for_username(browser,
username,
amount,
logger,
randomize=False,
media=None):
"""Fetches the number of links specified
by amount and returns a list of links"""
if media is None:
# All known media types
media = ['', 'Post', 'Video']
elif media == 'Photo':
# Include posts with multiple images in it
media = ['', 'Post']
else:
# Make it an array to use it in the following part
media = [media]
logger.info('Getting {} image list...'.format(username))
# Get user profile page
browser.get('https://www.instagram.com/' + username)
# update server calls
update_activity()
body_elem = browser.find_element_by_tag_name('body')
try:
is_private = body_elem.find_element_by_xpath(
'//h2[@class="_kcrwx"]')
except:
logger.info('Interaction begin...')
else:
if is_private:
logger.warning('This user is private...')
return False
abort = True
try:
load_button = body_elem.find_element_by_xpath(
'//a[contains(@class, "_1cr2e _epyes")]')
except:
try:
# scroll down to load posts
for i in range(int(ceil(amount/12))):
browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
sleep(2)
except:
logger.warning(
'Load button not found, working with current images!')
else:
abort = False
body_elem.send_keys(Keys.END)
sleep(2)
# update server calls
update_activity()
else:
abort = False
body_elem.send_keys(Keys.END)
sleep(2)
click_element(browser, load_button) # load_button.click()
# update server calls
update_activity()
body_elem.send_keys(Keys.HOME)
sleep(2)
# Get Links
main_elem = browser.find_element_by_tag_name('main')
link_elems = main_elem.find_elements_by_tag_name('a')
total_links = len(link_elems)
# Check there is at least one link
if total_links == 0:
return False
links = []
filtered_links = 0
try:
if link_elems:
links = [link_elem.get_attribute('href') for link_elem in link_elems
if link_elem and link_elem.text in media]
filtered_links = len(links)
except BaseException as e:
logger.error("link_elems error {}}".format(str(e)))
if randomize:
# Expanding the pooulation for better random distribution
amount = amount * 5
while (filtered_links < amount) and not abort:
amount_left = amount - filtered_links
# Average items of the right media per page loaded (total links checked for not zero)
new_per_page = ceil(12 * filtered_links / total_links)
if new_per_page == 0:
# Avoid division by zero
new_per_page = 1. / 12.
# Number of page load needed
new_needed = int(ceil(amount_left / new_per_page))
if new_needed > 12:
# Don't go bananas trying to get all of instagram!
new_needed = 12
for i in range(new_needed): # add images x * 12
# Keep the latest window active while loading more posts
before_load = total_links
body_elem.send_keys(Keys.END)
# update server calls
update_activity()
sleep(1)
body_elem.send_keys(Keys.HOME)
sleep(1)
link_elems = main_elem.find_elements_by_tag_name('a')
total_links = len(link_elems)
abort = (before_load == total_links)
if abort:
break
links = [link_elem.get_attribute('href') for link_elem in link_elems
if link_elem.text in media]
filtered_links = len(links)
if randomize:
# Shuffle the population index
links = random.sample(links, filtered_links)
return links[:amount]
def check_link(browser, link, dont_like, ignore_if_contains, ignore_users, username,
like_by_followers_upper_limit, like_by_followers_lower_limit, logger):
"""
Check the given link if it is appropriate
:param browser: The selenium webdriver instance
:param link:
:param dont_like: hashtags of inappropriate phrases
:param ignore_if_contains:
:param ignore_users:
:param username:
:param like_by_followers_upper_limit:
:param like_by_followers_lower_limit:
:param logger: the logger instance
:return: tuple of
boolean: True if inappropriate,
string: the username,
boolean: True if it is video media,
string: the message if inappropriate else 'None'
"""
browser.get(link)
# update server calls
update_activity()
sleep(2)
"""Check if the Post is Valid/Exists"""
post_page = browser.execute_script(
"return window._sharedData.entry_data.PostPage")
if post_page is None:
logger.warning('Unavailable Page: {}'.format(link.encode('utf-8')))
return True, None, None, 'Unavailable Page'
"""Gets the description of the link and checks for the dont_like tags"""
graphql = 'graphql' in post_page[0]
if graphql:
media = post_page[0]['graphql']['shortcode_media']
is_video = media['is_video']
user_name = media['owner']['username']
image_text = media['edge_media_to_caption']['edges']
image_text = image_text[0]['node']['text'] if image_text else None
owner_comments = browser.execute_script('''
latest_comments = window._sharedData.entry_data.PostPage[0].graphql.shortcode_media.edge_media_to_comment.edges;
if (latest_comments === undefined) latest_comments = Array();
owner_comments = latest_comments
.filter(item => item.node.owner.username == '{}')
.map(item => item.node.text)
.reduce((item, total) => item + '\\n' + total, '');
return owner_comments;
'''.format(user_name))
else:
media = post_page[0]['media']
is_video = media['is_video']
user_name = media['owner']['username']
image_text = media['caption']
owner_comments = browser.execute_script('''
latest_comments = window._sharedData.entry_data.PostPage[0].media.comments.nodes;
if (latest_comments === undefined) latest_comments = Array();
owner_comments = latest_comments
.filter(item => item.user.username == '{}')
.map(item => item.text)
.reduce((item, total) => item + '\\n' + total, '');
return owner_comments;
'''.format(user_name))
if owner_comments == '':
owner_comments = None
"""Append owner comments to description as it might contain further tags"""
if image_text is None:
image_text = owner_comments
elif owner_comments:
image_text = image_text + '\n' + owner_comments
"""If the image still has no description gets the first comment"""
if image_text is None:
if graphql:
image_text = media['edge_media_to_comment']['edges']
image_text = image_text[0]['node']['text'] if image_text else None
else:
image_text = media['comments']['nodes']
image_text = image_text[0]['text'] if image_text else None
if image_text is None:
image_text = "No description"
logger.info('Image from: {}'.format(user_name.encode('utf-8')))
"""Find the number of followes the user has"""
if like_by_followers_upper_limit or like_by_followers_lower_limit:
userlink = 'https://www.instagram.com/' + user_name
browser.get(userlink)
# update server calls
update_activity()
sleep(1)
try:
num_followers = browser.execute_script(
"return window._sharedData.entry_data."
"ProfilePage[0].graphql.user.edge_followed_by.count")
except WebDriverException:
try:
browser.execute_script("location.reload()")
num_followers = browser.execute_script(
"return window._sharedData.entry_data."
"ProfilePage[0].graphql.user.edge_followed_by.count")
except WebDriverException:
num_followers = 'undefined'
like_by_followers_lower_limit = None
like_by_followers_upper_limit = None
browser.get(link)
# update server calls
update_activity()
sleep(1)
logger.info('Number of Followers: {}'.format(num_followers))
if like_by_followers_upper_limit and \
num_followers > like_by_followers_upper_limit:
return True, user_name, is_video, \
'Number of followers exceeds limit'
if like_by_followers_lower_limit and \
num_followers < like_by_followers_lower_limit:
return True, user_name, is_video, \
'Number of followers does not reach minimum'
logger.info('Link: {}'.format(link.encode('utf-8')))
logger.info('Description: {}'.format(image_text.encode('utf-8')))
"""Check if the user_name is in the ignore_users list"""
if (user_name in ignore_users) or (user_name == username):
return True, user_name, is_video, 'Username'
if any((word in image_text for word in ignore_if_contains)):
return False, user_name, is_video, 'None'
dont_like_regex = []
for dont_likes in dont_like:
if dont_likes.startswith("#"):
dont_like_regex.append(dont_likes + "([^\d\w]|$)")
elif dont_likes.startswith("["):
dont_like_regex.append("#" + dont_likes[1:] + "[\d\w]+([^\d\w]|$)")
elif dont_likes.startswith("]"):
dont_like_regex.append("#[\d\w]+" + dont_likes[1:] + "([^\d\w]|$)")
else:
dont_like_regex.append(
"#[\d\w]*" + dont_likes + "[\d\w]*([^\d\w]|$)")
for dont_likes_regex in dont_like_regex:
quash = re.search(dont_likes_regex, image_text, re.IGNORECASE)
if quash:
quashed = (((quash.group(0)).split('#')[1]).split(' ')[0]).split('\n')[0].encode('utf-8') # dismiss possible space and newlines
iffy = ((re.split(r'\W+', dont_likes_regex))[3] if dont_likes_regex.endswith('*([^\\d\\w]|$)') else # 'word' without format
(re.split(r'\W+', dont_likes_regex))[1] if dont_likes_regex.endswith('+([^\\d\\w]|$)') else # '[word'
(re.split(r'\W+', dont_likes_regex))[3] if dont_likes_regex.startswith('#[\\d\\w]+') else # ']word'
(re.split(r'\W+', dont_likes_regex))[1]) # '#word'
inapp_unit = 'Inappropriate! ~ contains "{}"'.format(
quashed if iffy == quashed else
'" in "'.join([str(iffy), str(quashed)]))
return True, user_name, is_video, inapp_unit
return False, user_name, is_video, 'None'
def like_image(browser, username, blacklist, logger, logfolder):
"""Likes the browser opened image"""
# fetch spans fast
spans = [x.text.lower() for x in browser.find_elements_by_xpath("//article//a[@role='button']/span")]
if 'like' in spans:
like_elem = browser.find_elements_by_xpath(
"//a[@role='button']/span[text()='Like']/..")
# sleep real quick right before clicking the element
sleep(2)
click_element(browser, like_elem[0])
# check now we have unlike instead of like
liked_elem = browser.find_elements_by_xpath(
"//a[@role='button']/span[text()='Unlike']")
if len(liked_elem) == 1:
logger.info('--> Image Liked!')
update_activity('likes')
if blacklist['enabled'] is True:
action = 'liked'
add_user_to_blacklist(
browser, username, blacklist['campaign'], action, logger, logfolder
)
sleep(2)
return True
else:
# if like not seceded wait for 2 min
logger.info('--> Image was not able to get Liked! maybe blocked ?')
sleep(120)
else:
liked_elem = browser.find_elements_by_xpath(
"//a[@role='button']/span[text()='Unlike']")
if len(liked_elem) == 1:
logger.info('--> Image already liked! ')
return False
logger.info('--> Invalid Like Element!')
return False
def get_tags(browser, url):
"""Gets all the tags of the given description in the url"""
browser.get(url)
# update server calls
update_activity()
sleep(1)
graphql = browser.execute_script(
"return ('graphql' in window._sharedData.entry_data.PostPage[0])")
if graphql:
image_text = browser.execute_script(
"return window._sharedData.entry_data.PostPage[0].graphql."
"shortcode_media.edge_media_to_caption.edges[0].node.text")
else:
image_text = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].media.caption.text")
tags = findall(r'#\w*', image_text)
return tags
def get_links(browser, tag, logger, media, element):
# Get image links in scope from tags
link_elems = element.find_elements_by_tag_name('a')
sleep(2)
links = []
try:
if link_elems:
new_links = [link_elem.get_attribute('href') for link_elem in link_elems
if link_elem and link_elem.text in media]
links.extend(new_links)
else:
logger.info("'{}' tag does not contain a picture".format(tag[1:] if tag[:1] == '#' else tag))
except BaseException as e:
logger.error("link_elems error {}".format(str(e)))
return links | 36.343032 | 154 | 0.575594 |
794531b10fc3263ae13bf2014886f79ce3cf7b77 | 1,156 | py | Python | interview/leet/778_Swim_in_Rising_Water.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | interview/leet/778_Swim_in_Rising_Water.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | interview/leet/778_Swim_in_Rising_Water.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python3
# Thinking process
# The animation of spanning with colors black/white/gray
# Like the ones in princeton lecture really helped me
from heapq import heappush, heappop
class Solution:
def swimInWater(self, grid):
l, heap, delta = len(grid), [(grid[0][0], 0, 0)], [(0,1),(0,-1),(1,0),(-1,0)]
for t in range(l**2):
while heap[0][0] <= t:
v, i, j = heappop(heap)
if i == j == l-1:
return t
for dx, dy in delta:
x, y = i+dx, j+dy
if 0 <= x < l and 0 <= y < l and grid[x][y] >= 0:
heappush(heap, (grid[x][y], x, y))
grid[i][j] = -1
print('#'*80)
print(f'After time {t}:')
print(f'heap = {heap}')
for g in grid:
print(' '.join(map(lambda d: f'{d:-2d}', g)))
sol = Solution()
grid = [[0,2],[1,3]]
grid = [[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
grid = [[24,1,2,3,4],[0,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
print(sol.swimInWater(grid))
| 35.030303 | 85 | 0.467993 |
794532923fb7491591d8157e69d39950293a8ac4 | 689 | py | Python | antarest/study/storage/rawstudy/model/filesystem/root/input/thermal/series/series.py | AntaresSimulatorTeam/antaREST | d686d2a86a52737c211ae67f3cee591f559909f2 | [
"Apache-2.0"
] | 2 | 2020-09-30T11:40:22.000Z | 2020-11-09T09:06:30.000Z | antarest/study/storage/rawstudy/model/filesystem/root/input/thermal/series/series.py | AntaresSimulatorTeam/antaREST | d686d2a86a52737c211ae67f3cee591f559909f2 | [
"Apache-2.0"
] | 542 | 2021-01-11T13:23:47.000Z | 2022-03-31T15:38:10.000Z | antarest/study/storage/rawstudy/model/filesystem/root/input/thermal/series/series.py | AntaresSimulatorTeam/antaREST | d686d2a86a52737c211ae67f3cee591f559909f2 | [
"Apache-2.0"
] | 1 | 2020-10-01T12:18:15.000Z | 2020-10-01T12:18:15.000Z | from antarest.study.storage.rawstudy.model.filesystem.config.model import (
FileStudyTreeConfig,
)
from antarest.study.storage.rawstudy.model.filesystem.folder_node import (
FolderNode,
)
from antarest.study.storage.rawstudy.model.filesystem.inode import TREE
from antarest.study.storage.rawstudy.model.filesystem.root.input.thermal.series.area.area import (
InputThermalSeriesArea,
)
class InputThermalSeries(FolderNode):
def build(self) -> TREE:
children: TREE = {
a: InputThermalSeriesArea(
self.context, self.config.next_file(a), area=a
)
for a in self.config.area_names()
}
return children
| 31.318182 | 98 | 0.702467 |
7945338768de8a7ffa1412ed9f7e3c1b3239888a | 10,265 | py | Python | pygls/server.py | Maxattax97/pygls | db24ee52299b8429f7b4efd82fdea995f781f3e7 | [
"Apache-2.0"
] | null | null | null | pygls/server.py | Maxattax97/pygls | db24ee52299b8429f7b4efd82fdea995f781f3e7 | [
"Apache-2.0"
] | null | null | null | pygls/server.py | Maxattax97/pygls | db24ee52299b8429f7b4efd82fdea995f781f3e7 | [
"Apache-2.0"
] | null | null | null | ############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import logging
import sys
from concurrent.futures import Future, ThreadPoolExecutor
from multiprocessing.pool import ThreadPool
from re import findall
from threading import Event
from typing import Callable, Dict, List
from pygls.types import (ApplyWorkspaceEditResponse, ConfigCallbackType,
Diagnostic, MessageType, WorkspaceEdit)
from . import IS_WIN
from .protocol import LanguageServerProtocol
from .types import ConfigurationParams
from .workspace import Workspace
logger = logging.getLogger(__name__)
async def aio_readline(loop, executor, stop_event, rfile, proxy):
"""Reads data from stdin in separate thread (asynchronously)."""
while not stop_event.is_set():
# Read line
line = await loop.run_in_executor(executor, rfile.readline)
if not line:
continue
# Extract content length from line
try:
content_length = int(findall(rb'\b\d+\b', line)[0])
logger.debug('Content length: {}'.format(content_length))
except IndexError:
continue
# Throw away empty lines
while line and line.strip():
line = await loop.run_in_executor(executor, rfile.readline)
if not line:
continue
# Read body
body = await loop.run_in_executor(executor, rfile.read, content_length)
# Pass body to language server protocol
if body:
proxy(body)
class StdOutTransportAdapter:
"""Protocol adapter which overrides write method.
Write method sends data to stdout.
"""
def __init__(self, rfile, wfile):
self.rfile = rfile
self.wfile = wfile
def close(self):
self.rfile.close()
self.wfile.close()
def write(self, data):
self.wfile.write(data)
self.wfile.flush()
class Server:
"""Class that represents async server. It can be started using TCP or IO.
Args:
protocol_cls(Protocol): Protocol implementation that must be derived
from `asyncio.Protocol`
max_workers(int, optional): Number of workers for `ThreadPool` and
`ThreadPoolExecutor`
Attributes:
_max_workers(int): Number of workers for thread pool executor
_server(Server): Server object which can be used to stop the process
_stop_event(Event): Event used for stopping `aio_readline`
_thread_pool(ThreadPool): Thread pool for executing methods decorated
with `@ls.thread()` - lazy instantiated
_thread_pool_executor(ThreadPoolExecutor): Thread pool executor
passed to `run_in_executor`
- lazy instantiated
"""
def __init__(self, protocol_cls, loop=None, max_workers=2):
if not issubclass(protocol_cls, asyncio.Protocol):
raise TypeError('Protocol class should be subclass of asyncio.Protocol')
self._max_workers = max_workers
self._server = None
self._stop_event = None
self._thread_pool = None
self._thread_pool_executor = None
if IS_WIN:
asyncio.set_event_loop(asyncio.ProactorEventLoop())
else:
asyncio.set_event_loop(asyncio.SelectorEventLoop())
self.loop = loop or asyncio.get_event_loop()
try:
asyncio.get_child_watcher().attach_loop(self.loop)
except NotImplementedError:
pass
self.lsp = protocol_cls(self)
def shutdown(self):
"""Shutdown server."""
logger.info('Shutting down the server')
if self._thread_pool:
self._thread_pool.terminate()
self._thread_pool.join()
if self._thread_pool_executor:
self._thread_pool_executor.shutdown()
if self._server:
self._server.close()
self.loop.run_until_complete(self._server.wait_closed())
logger.info('Closing the event loop.')
self.loop.close()
def start_io(self, stdin=None, stdout=None):
"""Starts IO server."""
logger.info('Starting IO server')
self._stop_event = Event()
transport = StdOutTransportAdapter(stdin or sys.stdin.buffer,
stdout or sys.stdout.buffer)
self.lsp.connection_made(transport)
try:
self.loop.run_until_complete(
aio_readline(self.loop,
self.thread_pool_executor,
self._stop_event,
stdin or sys.stdin.buffer,
self.lsp.data_received))
except SystemExit:
pass
finally:
self._stop_event.set()
self.shutdown()
def start_tcp(self, host, port):
"""Starts TCP server."""
logger.info('Starting server on {}:{}'.format(host, port))
self._server = self.loop.run_until_complete(
self.loop.create_server(self.lsp, host, port)
)
try:
self.loop.run_forever()
except SystemExit:
pass
finally:
self.shutdown()
@property
def thread_pool(self) -> ThreadPool:
"""Returns thread pool instance (lazy initialization)."""
if not self._thread_pool:
self._thread_pool = ThreadPool(processes=self._max_workers)
return self._thread_pool
@property
def thread_pool_executor(self) -> ThreadPoolExecutor:
"""Returns thread pool instance (lazy initialization)."""
if not self._thread_pool_executor:
self._thread_pool_executor = \
ThreadPoolExecutor(max_workers=self._max_workers)
return self._thread_pool_executor
class LanguageServer(Server):
"""A class that represents Language server using Language Server Protocol.
This class can be extended and it can be passed as a first argument to
registered commands/features.
Args:
protocol_cls(LanguageServerProtocol): LSP or any subclass of it
max_workers(int, optional): Number of workers for `ThreadPool` and
`ThreadPoolExecutor`
"""
def __init__(self, loop=None, protocol_cls=LanguageServerProtocol, max_workers: int = 2):
if not issubclass(protocol_cls, LanguageServerProtocol):
raise TypeError('Protocol class should be subclass of LanguageServerProtocol')
super().__init__(protocol_cls, loop, max_workers)
def apply_edit(self, edit: WorkspaceEdit, label: str = None) -> ApplyWorkspaceEditResponse:
"""Sends apply edit request to the client."""
return self.lsp.apply_edit(edit, label)
def command(self, command_name: str) -> Callable:
"""Decorator used to register custom commands.
Example:
@ls.command('myCustomCommand')
def my_cmd(ls, a, b, c):
pass
"""
return self.lsp.fm.command(command_name)
def feature(self, feature_name: str, **options: Dict) -> Callable:
"""Decorator used to register LSP features.
Example:
@ls.feature('textDocument/completion', triggerCharacters=['.'])
def completions(ls, params: CompletionRequest):
return CompletionList(False, [CompletionItem("Completion 1")])
"""
return self.lsp.fm.feature(feature_name, **options)
def get_configuration(self, params: ConfigurationParams,
callback: ConfigCallbackType = None) -> Future:
"""Gets the configuration settings from the client."""
return self.lsp.get_configuration(params, callback)
def get_configuration_async(self, params: ConfigurationParams) -> asyncio.Future:
"""Gets the configuration settings from the client."""
return self.lsp.get_configuration_async(params)
def publish_diagnostics(self, doc_uri: str, diagnostics: List[Diagnostic]):
"""Sends diagnostic notification to the client."""
self.lsp.publish_diagnostics(doc_uri, diagnostics)
def send_notification(self, method: str, params: object = None) -> None:
"""Sends notification to the client."""
self.lsp.notify(method, params)
def show_message(self, message, msg_type=MessageType.Info) -> None:
"""Sends message to the client to display message."""
self.lsp.show_message(message, msg_type)
def show_message_log(self, message, msg_type=MessageType.Log) -> None:
"""Sends message to the client's output channel."""
self.lsp.show_message_log(message, msg_type)
def thread(self) -> Callable:
"""Decorator that mark function to execute it in a thread."""
return self.lsp.thread()
@property
def workspace(self) -> Workspace:
"""Returns in-memory workspace."""
return self.lsp.workspace
| 37.057762 | 95 | 0.602825 |
794534d7f92e767dc59122429f7b5ba1e5f42ddd | 6,046 | py | Python | geoutils/utils.py | behzad89/pochas-geoutils | 45323ee5e3f47a7f11b4f50cf01f3a8cb6e56623 | [
"MIT"
] | null | null | null | geoutils/utils.py | behzad89/pochas-geoutils | 45323ee5e3f47a7f11b4f50cf01f3a8cb6e56623 | [
"MIT"
] | null | null | null | geoutils/utils.py | behzad89/pochas-geoutils | 45323ee5e3f47a7f11b4f50cf01f3a8cb6e56623 | [
"MIT"
] | null | null | null | import datetime as dt
import json
import os
import numpy as np
import numpy.ma as ma
import rioxarray
import xarray as xr
from pyproj import CRS, Proj # type: ignore
# ModisAPI.py utils:
def geometry_from_geojson(filepath: str):
with open(filepath, "r") as f:
json_obj = json.load(f)
geojson_type = json_obj["type"]
if geojson_type == "FeatureCollection":
features = json_obj["features"]
if len(features) == 0:
raise IOError(f"No features contained in {filepath}")
elif len(features) > 1:
raise IOError(
f"More than one feature contained in {filepath}, must be exactly 1"
)
feature = features[0]
ftype = feature["geometry"]["type"]
if ftype not in ["Polygon", "MultiPolygon"]:
raise IOError(
f"Feature type in {filepath} must be either Polygon or MultiPolygon"
)
return feature["geometry"]
elif geojson_type in ["Polygon", "MultiPolygon"]:
return json_obj
else:
raise IOError(
f"Feature type in {filepath} must be either FeatureCollection, Polygon or MultiPolygon"
)
"""yield successive n-sized chunks from list l"""
def chunk(l: int, n: int):
for i in range(0, len(l), n): # type: ignore
yield l[i : i + n] # type: ignore
"""assemble request URL string"""
def getSubsetURL(
url: str,
prod: str,
lat: float,
lon: float,
band: int,
sd: str,
ed: str,
ab: float,
lr: float,
):
return "".join(
[
url,
prod,
"/subset?",
"latitude=",
str(lat),
"&longitude=",
str(lon),
"&band=",
str(band),
"&startDate=",
str(sd),
"&endDate=",
str(ed),
"&kmAboveBelow=",
str(ab),
"&kmLeftRight=",
str(lr),
]
)
def convert_to_NetCDF(subsets, coords, ouput_crs, ouput_cellsize):
# Use dictionary comprehension to get some spatial metadata from the first subset in our list:
meta = {key: value for key, value in subsets[0].items() if key != "subset"}
# Iterate over the list of subsets and collect the data in a dictionary:
data = {"dates": [], "arrays": []}
for i in subsets:
for j in i["subset"]:
data["dates"].append(j["calendar_date"])
data["arrays"].append(
np.array(j["data"]).reshape(meta["nrows"], meta["ncols"])
)
# Use the metadata to make lists of x and y coordinates:
# f(ncols,nrows): n * cellsize + corner_coordinate
dtdates = [dt.datetime.strptime(d, "%Y-%m-%d") for d in data["dates"]]
xcoordinates = [float(meta["xllcorner"])] + [
i * meta["cellsize"] + float(meta["xllcorner"]) for i in range(1, meta["ncols"])
]
ycoordinates = [float(meta["yllcorner"])] + [
i * meta["cellsize"] + float(meta["yllcorner"]) for i in range(1, meta["nrows"])
]
# Make an xarray.DataArray object:
xrDataArray = xr.DataArray(
name=meta["band"],
data=np.flipud(np.dstack(data["arrays"])),
coords=[np.array(ycoordinates), np.array(xcoordinates), dtdates], # type: ignore
dims=["y", "x", "time"],
attrs=dict(units=meta["units"]),
)
# Finally, save as netCDF:
xrDataArray_T = xrDataArray.transpose("time", "y", "x")
crs = "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m"
xrDataArray_CRS = xrDataArray_T.rio.write_crs(crs)
if ouput_crs != 4326:
if ouput_cellsize is None:
raise RuntimeError(f"Please determine the cell size for projection")
else:
file = xrDataArray_CRS.rio.reproject(
f"epsg:{ouput_crs}", resolution=ouput_cellsize
)
else:
file = xrDataArray_CRS.rio.reproject(f"epsg:{ouput_crs}")
xrDataArray_CRS.to_netcdf(
f"output_{coords[0]}_{coords[1]}.nc", unlimited_dims="time", engine="netcdf4"
)
# Define the help function to be used in the main function
def extract_point(b, rc):
"""
Extract the value for the points
"""
extracted_values = [b[coord[0], coord[1]] for coord in rc]
return extracted_values
def extract_point_buffer(b, rc, s):
"""
Extract the value based on the surrounded buffer
"""
extracted_values = [
np.mean(b[coord[0] - s : coord[0] + (s + 1), coord[1] - s : coord[1] + (s + 1)])
for coord in rc
]
return extracted_values
def extract_point_buffer_mask(b, rc, s, nd):
"""
Extract the value based on the surrounded buffer and mask the nodata value in calculation
"""
extracted_values = [
np.nanmean(
ma.masked_values(
b[coord[0] - s : coord[0] + (s + 1), coord[1] - s : coord[1] + (s + 1)],
nd,
).filled(np.nan)
)
for coord in rc
]
return extracted_values
def list_files_with_absolute_paths(dirpath: str, endswith: str = None): # type: ignore
if endswith is None:
files = []
for dirname, dirnames, filenames in os.walk(dirpath):
files += [os.path.join(dirname, filename) for filename in filenames]
else:
files = []
for dirname, dirnames, filenames in os.walk(dirpath):
files += [
os.path.join(dirname, filename)
for filename in filenames
if filename.endswith(endswith)
]
return files
def list_files(dirpath: str, endswith: str = None): # type: ignore
if endswith is not None:
files = []
for dirname, dirnames, filenames in os.walk(dirpath):
files += [filename for filename in filenames if filename.endswith(endswith)]
else:
files = []
for dirname, dirnames, filenames in os.walk(dirpath):
files += [filename for filename in filenames]
return files
| 29.783251 | 99 | 0.573602 |
79453525ec9d5f998b11f8a71e5780fa96153157 | 405 | py | Python | zExtraLearning/get_file_encoding.py | talk2sunil83/UpgradLearning | 70c4f993c68ce5030e9df0edd15004bbb9fc71e7 | [
"Apache-2.0"
] | null | null | null | zExtraLearning/get_file_encoding.py | talk2sunil83/UpgradLearning | 70c4f993c68ce5030e9df0edd15004bbb9fc71e7 | [
"Apache-2.0"
] | null | null | null | zExtraLearning/get_file_encoding.py | talk2sunil83/UpgradLearning | 70c4f993c68ce5030e9df0edd15004bbb9fc71e7 | [
"Apache-2.0"
] | null | null | null | # %%
def predict_encoding(file_path, n_lines=20):
'''Predict a file's encoding using chardet'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding']
predict_encoding("Online+Retail.csv")
| 27 | 66 | 0.671605 |
79453538a003d572ab0794e0bb23bbe7764998d8 | 149 | py | Python | jumper/entities/weapon.py | ccmikechen/Jumper-Game | b68a03cdfee27cea2bfb321f77b57ce80904bef6 | [
"MIT"
] | null | null | null | jumper/entities/weapon.py | ccmikechen/Jumper-Game | b68a03cdfee27cea2bfb321f77b57ce80904bef6 | [
"MIT"
] | null | null | null | jumper/entities/weapon.py | ccmikechen/Jumper-Game | b68a03cdfee27cea2bfb321f77b57ce80904bef6 | [
"MIT"
] | 1 | 2017-12-19T17:42:52.000Z | 2017-12-19T17:42:52.000Z | from jumper.entities.item import Item
class Weapon(Item):
def trigger(self, env):
pass
def get_type(self):
return 'weapon'
| 16.555556 | 37 | 0.637584 |
794535e1eb8f99345ede9c1ed4270ebc90abd556 | 5,534 | py | Python | tests/conftest.py | CK-Chaitanya/rasa_core | cf2868ab9f04527ebe8bf5ef685536f3027c16c8 | [
"Apache-2.0"
] | 1 | 2020-09-03T01:26:39.000Z | 2020-09-03T01:26:39.000Z | tests/conftest.py | farooqarahim/rasa_core | acbf97ff1923a553eadd5cf881e64c50e622ae90 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | farooqarahim/rasa_core | acbf97ff1923a553eadd5cf881e64c50e622ae90 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
from builtins import str
import matplotlib
import pytest
from pytest_localserver.http import WSGIServer
from rasa_core import train, server
from rasa_core.agent import Agent
from rasa_core.channels import CollectingOutputChannel, RestInput, channel
from rasa_core.dispatcher import Dispatcher
from rasa_core.domain import Domain
from rasa_core.interpreter import RegexInterpreter
from rasa_core.nlg import TemplatedNaturalLanguageGenerator
from rasa_core.policies.ensemble import SimplePolicyEnsemble, PolicyEnsemble
from rasa_core.policies.memoization import (
Policy, MemoizationPolicy, AugmentedMemoizationPolicy)
from rasa_core.processor import MessageProcessor
from rasa_core.slots import Slot
from rasa_core.tracker_store import InMemoryTrackerStore
from rasa_core.trackers import DialogueStateTracker
from rasa_core.utils import zip_folder
matplotlib.use('Agg')
logging.basicConfig(level="DEBUG")
DEFAULT_DOMAIN_PATH = "data/test_domains/default_with_slots.yml"
DEFAULT_STORIES_FILE = "data/test_stories/stories_defaultdomain.md"
END_TO_END_STORY_FILE = "data/test_evaluations/end_to_end_story.md"
MOODBOT_MODEL_PATH = "examples/moodbot/models/dialogue"
DEFAULT_ENDPOINTS_FILE = "data/test_endpoints/example_endpoints.yml"
class CustomSlot(Slot):
def as_feature(self):
return [0.5]
class ExamplePolicy(Policy):
def __init__(self, example_arg):
pass
@pytest.fixture(scope="session")
def default_domain():
return Domain.load(DEFAULT_DOMAIN_PATH)
@pytest.fixture(scope="session")
def default_agent(default_domain):
agent = Agent(default_domain,
policies=[MemoizationPolicy()],
interpreter=RegexInterpreter(),
tracker_store=InMemoryTrackerStore(default_domain))
training_data = agent.load_data(DEFAULT_STORIES_FILE)
agent.train(training_data)
return agent
@pytest.fixture(scope="session")
def default_agent_path(default_agent, tmpdir_factory):
path = tmpdir_factory.mktemp("agent").strpath
default_agent.persist(path)
return path
@pytest.fixture
def default_dispatcher_collecting(default_nlg):
bot = CollectingOutputChannel()
return Dispatcher("my-sender", bot, default_nlg)
@pytest.fixture
def default_processor(default_domain, default_nlg):
agent = Agent(default_domain,
SimplePolicyEnsemble([AugmentedMemoizationPolicy()]),
interpreter=RegexInterpreter())
training_data = agent.load_data(DEFAULT_STORIES_FILE)
agent.train(training_data)
tracker_store = InMemoryTrackerStore(default_domain)
return MessageProcessor(agent.interpreter,
agent.policy_ensemble,
default_domain,
tracker_store,
default_nlg)
@pytest.fixture(scope="session")
def trained_moodbot_path():
train.train_dialogue_model(
domain_file="examples/moodbot/domain.yml",
stories_file="examples/moodbot/data/stories.md",
output_path=MOODBOT_MODEL_PATH,
interpreter=RegexInterpreter(),
policy_config='default_config.yml',
kwargs=None
)
return MOODBOT_MODEL_PATH
@pytest.fixture(scope="session")
def zipped_moodbot_model():
# train moodbot if necessary
policy_file = os.path.join(MOODBOT_MODEL_PATH, 'policy_metadata.json')
if not os.path.isfile(policy_file):
trained_moodbot_path()
zip_path = zip_folder(MOODBOT_MODEL_PATH)
return zip_path
@pytest.fixture(scope="session")
def moodbot_domain():
domain_path = os.path.join(MOODBOT_MODEL_PATH, 'domain.yml')
return Domain.load(domain_path)
@pytest.fixture(scope="session")
def moodbot_metadata():
return PolicyEnsemble.load_metadata(MOODBOT_MODEL_PATH)
@pytest.fixture(scope="module")
def http_app(request, core_server):
http_server = WSGIServer(application=core_server)
http_server.start()
request.addfinalizer(http_server.stop)
return http_server.url
@pytest.fixture(scope="module")
def core_server(tmpdir_factory):
model_path = tmpdir_factory.mktemp("model").strpath
agent = Agent("data/test_domains/default.yml",
policies=[AugmentedMemoizationPolicy(max_history=3)])
training_data = agent.load_data(DEFAULT_STORIES_FILE)
agent.train(training_data)
agent.persist(model_path)
loaded_agent = Agent.load(model_path,
interpreter=RegexInterpreter())
app = server.create_app(loaded_agent)
channel.register([RestInput()],
app,
agent.handle_message,
"/webhooks/")
return app
@pytest.fixture(scope="module")
def core_server_secured(default_agent):
app = server.create_app(default_agent,
auth_token="rasa",
jwt_secret="core")
channel.register([RestInput()],
app,
default_agent.handle_message,
"/webhooks/")
return app
@pytest.fixture
def default_nlg(default_domain):
return TemplatedNaturalLanguageGenerator(default_domain.templates)
@pytest.fixture
def default_tracker(default_domain):
import uuid
uid = str(uuid.uuid1())
return DialogueStateTracker(uid, default_domain.slots)
| 28.973822 | 76 | 0.722082 |
79453658a167a04ae171d9ba78119cbe2644c875 | 28,593 | py | Python | python/pyspark/sql/context.py | souo/spark | 5cdea7d1e54e751f928876ead33fc01e7e2f3437 | [
"Apache-2.0"
] | 1 | 2019-12-02T03:54:41.000Z | 2019-12-02T03:54:41.000Z | python/pyspark/sql/context.py | souo/spark | 5cdea7d1e54e751f928876ead33fc01e7e2f3437 | [
"Apache-2.0"
] | 2 | 2016-02-04T21:55:13.000Z | 2016-02-22T21:34:56.000Z | python/pyspark/sql/context.py | souo/spark | 5cdea7d1e54e751f928876ead33fc01e7e2f3437 | [
"Apache-2.0"
] | 2 | 2020-07-23T13:31:01.000Z | 2021-05-06T15:46:24.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
import json
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from py4j.protocol import Py4JError
from pyspark import since
from pyspark.rdd import RDD, _prepare_for_python_RDD, ignore_unicode_prefix
from pyspark.serializers import AutoBatchedSerializer, PickleSerializer
from pyspark.sql.types import Row, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.utils import install_exception_handler
from pyspark.sql.functions import UserDefinedFunction
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
def _monkey_patch_RDD(sqlContext):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``sqlContext.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a StructType or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sqlContext.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SQLContext(object):
"""Main entry point for Spark SQL functionality.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.registerTempTable("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row(_c0=2, _c1=2.0, _c2=False, _c3=2, _c4=0, \
time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
self._scala_SQLContext = sqlContext
_monkey_patch_RDD(self)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
if self._scala_SQLContext is None:
self._scala_SQLContext = self._jvm.SQLContext(self._jsc.sc())
return self._scala_SQLContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
cls(sc, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary tables and UDFs, but shared SparkContext and
table cache.
"""
jsqlContext = self._ssql_ctx.newSession()
return self.__class__(self._sc, jsqlContext)
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self._ssql_ctx.setConf(key, value)
@since(1.3)
def getConf(self, key, defaultValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set, returns defaultValue.
"""
return self._ssql_ctx.getConf(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single LongType column named `id`,
containing elements in a range from `start` to `end` (exclusive) with
step value `step`.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._ssql_ctx.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._ssql_ctx.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a lambda function as a UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param samplingRatio: lambda function
:param returnType: a :class:`DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(_c0=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(_c0=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(_c0=4)]
"""
udf = UserDefinedFunction(f, returnType, name)
self._ssql_ctx.udf().registerPython(name, udf._judf)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: StructType
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = _infer_schema(first)
if _has_nulltype(schema):
for r in data:
schema = _merge_type(schema, _infer_schema(r))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: StructType
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
@ignore_unicode_prefix
def inferSchema(self, rdd, samplingRatio=None):
"""
.. note:: Deprecated in 1.3, use :func:`createDataFrame` instead.
"""
warnings.warn("inferSchema is deprecated, please use createDataFrame instead.")
if isinstance(rdd, DataFrame):
raise TypeError("Cannot apply schema to DataFrame")
return self.createDataFrame(rdd, None, samplingRatio)
@ignore_unicode_prefix
def applySchema(self, rdd, schema):
"""
.. note:: Deprecated in 1.3, use :func:`createDataFrame` instead.
"""
warnings.warn("applySchema is deprecated, please use createDataFrame instead")
if isinstance(rdd, DataFrame):
raise TypeError("Cannot apply schema to DataFrame")
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType, but got %s" % type(schema))
return self.createDataFrame(rdd, schema)
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif isinstance(schema, StructType):
# take the first few rows to verify schema
rows = rdd.take(10)
for row in rows:
_verify_type(row, schema)
else:
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from an list or pandas.DataFrame, returns
the RDD and schema.
"""
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif isinstance(schema, StructType):
for row in data:
_verify_type(row, schema)
else:
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None):
"""
Creates a :class:`DataFrame` from an :class:`RDD` of :class:`tuple`/:class:`list`,
list or :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of :class:`Row`/:class:`tuple`/:class:`list`/:class:`dict`,
:class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`StructType` or list of column names. default None.
:param samplingRatio: the sample ratio of rows used for inferring
:return: :class:`DataFrame`
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]]).collect()) # doctest: +SKIP
[Row(0=1, 1=2)]
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data, schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(data, schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._ssql_ctx.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self)
df._schema = schema
return df
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
if (df.__class__ is DataFrame):
self._ssql_ctx.registerDataFrameAsTable(df._jdf, tableName)
else:
raise ValueError("Can only register DataFrame as table")
def parquetFile(self, *paths):
"""Loads a Parquet file, returning the result as a :class:`DataFrame`.
.. note:: Deprecated in 1.4, use :func:`DataFrameReader.parquet` instead.
>>> sqlContext.parquetFile('python/test_support/sql/parquet_partitioned').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
warnings.warn("parquetFile is deprecated. Use read.parquet() instead.")
gateway = self._sc._gateway
jpaths = gateway.new_array(gateway.jvm.java.lang.String, len(paths))
for i in range(0, len(paths)):
jpaths[i] = paths[i]
jdf = self._ssql_ctx.parquetFile(jpaths)
return DataFrame(jdf, self)
def jsonFile(self, path, schema=None, samplingRatio=1.0):
"""Loads a text file storing one JSON object per line as a :class:`DataFrame`.
.. note:: Deprecated in 1.4, use :func:`DataFrameReader.json` instead.
>>> sqlContext.jsonFile('python/test_support/sql/people.json').dtypes
[('age', 'bigint'), ('name', 'string')]
"""
warnings.warn("jsonFile is deprecated. Use read.json() instead.")
if schema is None:
df = self._ssql_ctx.jsonFile(path, samplingRatio)
else:
scala_datatype = self._ssql_ctx.parseDataType(schema.json())
df = self._ssql_ctx.jsonFile(path, scala_datatype)
return DataFrame(df, self)
@ignore_unicode_prefix
@since(1.0)
def jsonRDD(self, rdd, schema=None, samplingRatio=1.0):
"""Loads an RDD storing one JSON object per string as a :class:`DataFrame`.
If the schema is provided, applies the given schema to this JSON dataset.
Otherwise, it samples the dataset with ratio ``samplingRatio`` to determine the schema.
>>> df1 = sqlContext.jsonRDD(json)
>>> df1.first()
Row(field1=1, field2=u'row1', field3=Row(field4=11, field5=None), field6=None)
>>> df2 = sqlContext.jsonRDD(json, df1.schema)
>>> df2.first()
Row(field1=1, field2=u'row1', field3=Row(field4=11, field5=None), field6=None)
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("field2", StringType()),
... StructField("field3",
... StructType([StructField("field5", ArrayType(IntegerType()))]))
... ])
>>> df3 = sqlContext.jsonRDD(json, schema)
>>> df3.first()
Row(field2=u'row1', field3=Row(field5=None))
"""
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = rdd.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._jvm.BytesToString())
if schema is None:
df = self._ssql_ctx.jsonRDD(jrdd.rdd(), samplingRatio)
else:
scala_datatype = self._ssql_ctx.parseDataType(schema.json())
df = self._ssql_ctx.jsonRDD(jrdd.rdd(), scala_datatype)
return DataFrame(df, self)
def load(self, path=None, source=None, schema=None, **options):
"""Returns the dataset in a data source as a :class:`DataFrame`.
.. note:: Deprecated in 1.4, use :func:`DataFrameReader.load` instead.
"""
warnings.warn("load is deprecated. Use read.load() instead.")
return self.read.load(path, source, schema, **options)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
if path is not None:
options["path"] = path
if source is None:
source = self.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
if schema is None:
df = self._ssql_ctx.createExternalTable(tableName, source, options)
else:
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
scala_datatype = self._ssql_ctx.parseDataType(schema.json())
df = self._ssql_ctx.createExternalTable(tableName, source, scala_datatype,
options)
return DataFrame(df, self)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._ssql_ctx.sql(sqlQuery), self)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._ssql_ctx.table(tableName), self)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("db")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param hiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
"""
def __init__(self, sparkContext, hiveContext=None):
SQLContext.__init__(self, sparkContext)
if hiveContext:
self._scala_HiveContext = hiveContext
@property
def _ssql_ctx(self):
try:
if not hasattr(self, '_scala_HiveContext'):
self._scala_HiveContext = self._get_hive_ctx()
return self._scala_HiveContext
except Py4JError as e:
raise Exception("You must build Spark with Hive. "
"Export 'SPARK_HIVE=true' and run "
"build/sbt assembly", e)
def _get_hive_ctx(self):
return self._jvm.HiveContext(self._jsc.sc())
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 38.022606 | 100 | 0.612667 |
79453709975a0c8c854bcb9382a6c064360bf829 | 4,204 | py | Python | internal/notes/builtin-SAVE/packages/qbox/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/qbox/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/qbox/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import shutil
from spack import *
class Qbox(MakefilePackage):
"""Qbox is a C++/MPI scalable parallel implementation of first-principles
molecular dynamics (FPMD) based on the plane-wave, pseudopotential
formalism. Qbox is designed for operation on large parallel computers."""
homepage = "http://qboxcode.org/"
url = "http://qboxcode.org/download/qbox-1.63.7.tgz"
version('1.63.7', '6b0cf5656f816a1a59e22b268387af33')
version('1.63.5', 'da3161ab6a455793f2133dd03c04077c')
version('1.63.4', '8596f32c8fb7e7baa96571c655aaee07')
version('1.63.2', '55e8f13f37c8e1f43ca831791e3af1da')
version('1.63.0', '1436c884c553ab026b7f787307e5e6ed')
version('1.62.3', 'f07e038ab92b85956794e91a40346dbf')
version('1.60.9', '53b0df612e56bb65e8220d9d9dc8d395')
version('1.60.4', '060846c9fa03b1f3d7d5ce24a9491de2')
version('1.60.0', '3856cdc38a27dc17705844c4b9241a21')
version('1.58.0', 'ec0e6b79fa0ed06742212b1142e36b6a')
version('1.56.2', 'df7a4217d059a5d351d92e480ed14112')
version('1.54.4', '8f1a23af7d871317de93810b664ad3aa')
version('1.54.2', 'aeefee061255dbb36ca7e49378f63ad5')
version('1.52.3', '1862f2b2056cdf49ec4f746d45a7f1a6')
version('1.52.2', 'e406deb4c46176f1c15c226868bf61e2')
version('1.50.4', 'b06ff877257884e4fac321fb5a486266')
version('1.50.2', '171660b1bb5e57637f019fef055fb764')
version('1.50.1', '1da528b39da134f86f134432e8fada79')
version('1.47.0', '86f402651d440e05adc94168d6105da7')
version('1.45.3', '73b99a73dcbb1b5be9f66f3284750205')
version('1.45.1', '59e0c2583769b7586981c0d6ffa1b267')
version('1.45.0', '2c5bfbadfffd330c8c2fe294a10a08e4')
version('1.44.0', 'c46a2f0f68fe9229aa77779da188cea9')
depends_on('mpi')
depends_on('blas')
depends_on('scalapack')
depends_on('fftw')
depends_on('xerces-c')
build_directory = 'src'
def edit(self, spec, prefix):
with open('src/spack.mk', 'w') as mkfile:
mkfile.write('CXX = {0}\n'.format(spec['mpi'].mpicxx))
mkfile.write('LD = $(CXX)\n')
qbox_libs = spec['fftw'].libs + spec['xerces-c'].libs + \
spec['scalapack'].libs + spec['blas'].libs
mkfile.write('LDFLAGS = {0}\n'.format(qbox_libs.ld_flags))
mkfile.write('DFLAGS = {0}\n'.format(' -D'.join((
'',
'_LARGEFILE_SOURCE', 'USE_MPI', 'USE_XERCES',
'XERCESC_3', 'MPICH_IGNORE_CXX_SEEK', 'SCALAPACK',
'USE_FFTW3', 'FFTWMEASURE', 'FFTW3_2D', 'ADD_',
))))
mkfile.write('CXXFLAGS = {0}\n'.format(' '.join((
'-g', '-O3', '$(DFLAGS)',
))))
filter_file('$(TARGET)', 'spack', 'src/Makefile', string=True)
def install(self, spec, prefix):
mkdir(prefix.src)
install('src/qb', prefix.src)
shutil.move('test', prefix)
shutil.move('xml', prefix)
shutil.move('util', prefix)
| 44.723404 | 78 | 0.651998 |
794537e8bb21535241ebfd4b553b51fc13805c99 | 2,023 | py | Python | pylot/control/flags.py | alvkao58/pylot | ab49647236fcbc8aa08ec9650e0596e778e9ef85 | [
"Apache-2.0"
] | null | null | null | pylot/control/flags.py | alvkao58/pylot | ab49647236fcbc8aa08ec9650e0596e778e9ef85 | [
"Apache-2.0"
] | null | null | null | pylot/control/flags.py | alvkao58/pylot | ab49647236fcbc8aa08ec9650e0596e778e9ef85 | [
"Apache-2.0"
] | null | null | null | from absl import flags
# PID controller parameters.
flags.DEFINE_float('pid_p', 0.25, 'PID p parameter')
flags.DEFINE_float('pid_i', 0.20, 'PID i parameter')
flags.DEFINE_float('pid_d', 0.0, 'PID d parameter')
# Agent stopping configs.
flags.DEFINE_bool('stop_for_traffic_lights', True,
'True to enable traffic light stopping')
flags.DEFINE_bool('stop_for_people', True, 'True to enable person stopping')
flags.DEFINE_bool('stop_for_vehicles', True, 'True to enable vehicle stopping')
# Agent stopping parameters.
flags.DEFINE_integer('traffic_light_min_dist_thres', 5,
'Min distance threshold traffic light')
flags.DEFINE_integer('traffic_light_max_dist_thres', 20,
'Max distance threshold traffic light')
flags.DEFINE_float('traffic_light_angle_thres', 0.5,
'Traffic light angle threshold')
flags.DEFINE_integer('vehicle_distance_thres', 15,
'Vehicle distance threshold')
flags.DEFINE_float('vehicle_angle_thres', 0.4, 'Vehicle angle threshold')
flags.DEFINE_float('person_angle_hit_thres', 0.15,
'Person hit zone angle threshold')
flags.DEFINE_integer('person_distance_emergency_thres', 12,
'Person emergency zone distance threshold')
flags.DEFINE_float('person_angle_emergency_thres', 0.5,
'Person emergency zone angle threshold')
flags.DEFINE_integer('person_distance_hit_thres', 35,
'Person hit zone distance threshold')
# Steering control parameters
flags.DEFINE_float('default_throttle', 0.0, 'Default throttle')
flags.DEFINE_float('throttle_max', 0.75, 'Max throttle')
flags.DEFINE_integer(
'target_speed', 10,
'Target speed in m/s , could be controlled by the speed limit')
flags.DEFINE_float('steer_gain', 0.7, 'Gain on computed steering angle')
flags.DEFINE_float('brake_strength', 1,
'Strength for applying brake; between 0 and 1')
flags.DEFINE_integer('coast_factor', 2, 'Factor to control coasting')
| 50.575 | 79 | 0.714286 |
794538468230b90f2faecdc3dd5c11688193ea3c | 11,441 | py | Python | bentoml/_internal/runner/utils.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | bentoml/_internal/runner/utils.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | bentoml/_internal/runner/utils.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | import os
import re
import math
import ctypes
import typing as t
import logging
import itertools
from typing import TYPE_CHECKING
from functools import lru_cache
from simple_di.providers import SingletonFactory
from ...exceptions import BentoMLException
logger = logging.getLogger(__name__)
# Some constants taken from cuda.h
if TYPE_CHECKING:
from ctypes import c_int
from ctypes import c_char_p
from ctypes import c_size_t
from ctypes import c_void_p
CDataType = t.Union[c_int, c_void_p, c_size_t, c_char_p]
from aiohttp import MultipartWriter
from starlette.requests import Request
from ..runner.container import Payload
class PlcType(t.TypedDict):
err: c_char_p
device: c_int
num_gpus: c_int
context: c_void_p
free_mem: c_size_t
total_mem: c_size_t
T = t.TypeVar("T")
To = t.TypeVar("To")
CUDA_SUCCESS = 0
class Params(t.Generic[T]):
def __init__(self, *args: T, **kwargs: T):
self.args: t.Tuple[T, ...] = args
self.kwargs: t.Dict[str, T] = kwargs
def map(self, function: t.Callable[[T], To]) -> "Params[To]":
args = tuple(function(a) for a in self.args)
kwargs = {k: function(v) for k, v in self.kwargs.items()}
return Params[To](*args, **kwargs)
def imap(
self, function: t.Callable[[T], t.Iterable[To]]
) -> "t.Iterator[Params[To]]":
args_iter = tuple(iter(function(a)) for a in self.args)
kwargs_iter = {k: iter(function(v)) for k, v in self.kwargs.items()}
try:
while True:
args = tuple(next(a) for a in args_iter)
kwargs = {k: next(v) for k, v in kwargs_iter.items()}
yield Params[To](*args, **kwargs)
except StopIteration:
pass
def items(self) -> t.Iterator[t.Tuple[t.Union[int, str], T]]:
return itertools.chain(enumerate(self.args), self.kwargs.items())
@classmethod
def agg(
cls,
params_list: t.Sequence["Params[T]"],
agg_func: t.Callable[[t.Sequence[T]], To] = lambda i: i,
) -> "Params[To]":
if not params_list:
return t.cast(Params[To], [])
args: t.List[To] = []
kwargs: t.Dict[str, To] = {}
for j, _ in enumerate(params_list[0].args):
arg: t.List[T] = []
for params in params_list:
arg.append(params.args[j])
args.append(agg_func(arg))
for k in params_list[0].kwargs:
kwarg: t.List[T] = []
for params in params_list:
kwarg.append(params.kwargs[k])
kwargs[k] = agg_func(kwarg)
return Params(*tuple(args), **kwargs)
@property
def sample(self) -> T:
if self.args:
return self.args[0]
return next(iter(self.kwargs.values()))
PAYLOAD_META_HEADER = "Bento-Payload-Meta"
def payload_params_to_multipart(params: Params["Payload"]) -> "MultipartWriter":
import json
from multidict import CIMultiDict
from aiohttp.multipart import MultipartWriter
multipart = MultipartWriter(subtype="form-data")
for key, payload in params.items():
multipart.append(
payload.data,
headers=CIMultiDict(
(
(PAYLOAD_META_HEADER, json.dumps(payload.meta)),
("Content-Disposition", f'form-data; name="{key}"'),
)
),
)
return multipart
async def multipart_to_payload_params(request: "Request") -> Params["Payload"]:
import json
from bentoml._internal.runner.container import Payload
from bentoml._internal.utils.formparser import populate_multipart_requests
parts = await populate_multipart_requests(request)
max_arg_index = -1
kwargs: t.Dict[str, Payload] = {}
args_map: t.Dict[int, Payload] = {}
for field_name, req in parts.items():
payload = Payload(
data=await req.body(),
meta=json.loads(req.headers[PAYLOAD_META_HEADER]),
)
if field_name.isdigit():
arg_index = int(field_name)
args_map[arg_index] = payload
max_arg_index = max(max_arg_index, arg_index)
else:
kwargs[field_name] = payload
args = tuple(args_map[i] for i in range(max_arg_index + 1))
return Params(*args, **kwargs)
def cpu_converter(cpu: t.Union[int, float, str]) -> float:
if isinstance(cpu, (int, float)):
return float(cpu)
if isinstance(cpu, str):
milli_match = re.match("([0-9]+)m", cpu)
if milli_match:
return int(milli_match[1]) / 1000.0
raise ValueError(f"Invalid CPU resource limit '{cpu}'")
def mem_converter(mem: t.Union[int, str]) -> int:
if isinstance(mem, int):
return mem
if isinstance(mem, str):
unit_match = re.match("([0-9]+)([A-Za-z]{1,2})", mem)
mem_multipliers = {
"k": 1000,
"M": 1000**2,
"G": 1000**3,
"T": 1000**4,
"P": 1000**5,
"E": 1000**6,
"Ki": 1024,
"Mi": 1024**2,
"Gi": 1024**3,
"Ti": 1024**4,
"Pi": 1024**5,
"Ei": 1024**6,
}
if unit_match:
base = int(unit_match[1])
unit = unit_match[2]
if unit in mem_multipliers:
return base * mem_multipliers[unit]
raise ValueError(f"Invalid MEM resource limit '{mem}'")
@lru_cache(maxsize=1)
def query_cgroup_cpu_count() -> float:
# Query active cpu processor count using cgroup v1 API, based on OpenJDK
# implementation for `active_processor_count` using cgroup v1:
# https://github.com/openjdk/jdk/blob/master/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
# For cgroup v2, see:
# https://github.com/openjdk/jdk/blob/master/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
def _read_integer_file(filename: str) -> int:
with open(filename, "r", encoding="utf-8") as f:
return int(f.read().rstrip())
cgroup_root = "/sys/fs/cgroup/"
cfs_quota_us_file = os.path.join(cgroup_root, "cpu", "cpu.cfs_quota_us")
cfs_period_us_file = os.path.join(cgroup_root, "cpu", "cpu.cfs_period_us")
shares_file = os.path.join(cgroup_root, "cpu", "cpu.shares")
quota = shares = period = -1
if os.path.isfile(cfs_quota_us_file):
quota = _read_integer_file(cfs_quota_us_file)
if os.path.isfile(shares_file):
shares = _read_integer_file(shares_file)
if shares == 1024:
shares = -1
if os.path.isfile(cfs_period_us_file):
period = _read_integer_file(cfs_period_us_file)
os_cpu_count = float(os.cpu_count() or 1)
limit_count = math.inf
quota_count = 0.0
share_count = 0.0
if quota > -1 and period > 0:
quota_count = float(quota) / float(period)
if shares > -1:
share_count = float(shares) / float(1024)
if quota_count != 0 and share_count != 0:
limit_count = min(quota_count, share_count)
if quota_count != 0:
limit_count = quota_count
if share_count != 0:
limit_count = share_count
return float(min(limit_count, os_cpu_count))
@SingletonFactory
def _cuda_lib() -> "ctypes.CDLL":
libs = ("libcuda.so", "cuda.dll")
for lib in libs:
try:
return ctypes.CDLL(lib)
except OSError:
continue
raise OSError(f"could not load any of: {' '.join(libs)}")
@SingletonFactory
def _init_var() -> t.Tuple["ctypes.CDLL", "PlcType"]:
# https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE.html
# TODO: add threads_per_core, cores, Compute Capability
err = ctypes.c_char_p()
plc: PlcType = {
"err": err,
"device": ctypes.c_int(),
"num_gpus": ctypes.c_int(),
"context": ctypes.c_void_p(),
"free_mem": ctypes.c_size_t(),
"total_mem": ctypes.c_size_t(),
}
try:
_drv = _cuda_lib.get()
res = _drv.cuInit(0)
if res != CUDA_SUCCESS:
_drv.cuGetErrorString(res, ctypes.byref(err))
logger.error(f"cuInit failed with error code {res}: {str(err.value)}")
return _drv, plc
except OSError as e:
raise BentoMLException(
f"{e}\nMake sure to have CUDA "
f"installed you are intending "
f"to use GPUs with BentoML."
)
def gpu_converter(gpus: t.Optional[t.Union[int, str, t.List[str]]]) -> t.List[str]:
if gpus is not None:
drv, plc = _init_var.get()
res = drv.cuDeviceGetCount(ctypes.byref(plc["num_gpus"]))
if res != CUDA_SUCCESS:
drv.cuGetErrorString(res, ctypes.byref(plc["err"]))
logger.error(
f"cuDeviceGetCount failed with error code {res}: {str(plc['err'].value)}"
)
def _validate_dev(dev_id: t.Union[int, str]) -> bool:
_res = drv.cuDeviceGet(ctypes.byref(plc["device"]), int(dev_id))
if _res != CUDA_SUCCESS:
drv.cuGetErrorString(_res, ctypes.byref(plc["err"]))
logger.warning(
"cuDeviceGet failed "
f"with error code {_res}: {str(plc['err'].value)}"
)
return False
return True
if isinstance(gpus, (int, str)):
if gpus == "all":
return [str(dev) for dev in range(plc["num_gpus"].value)]
else:
if _validate_dev(gpus):
return [str(gpus)]
raise BentoMLException(
f"Unknown GPU devices. Available devices: {plc['num_gpus'].value}"
)
else:
return list(
itertools.chain.from_iterable([gpu_converter(gpu) for gpu in gpus])
)
return list()
def get_gpu_memory(dev: int) -> t.Tuple[int, int]:
"""Return Total Memory and Free Memory in given GPU device. in MiB"""
drv, plc = _init_var.get()
res = drv.cuDeviceGet(ctypes.byref(plc["device"]), dev)
if res != CUDA_SUCCESS:
drv.cuGetErrorString(res, ctypes.byref(plc["err"]))
logger.error(
"cuDeviceGet failed " f"with error code {res}: {str(plc['err'].value)}"
)
try:
res = drv.cuCtxCreate_v2(ctypes.byref(plc["context"]), 0, plc["device"])
except AttributeError:
res = drv.cuCtxCreate(ctypes.byref(plc["context"]), 0, plc["device"])
if res != CUDA_SUCCESS:
drv.cuGetErrorString(res, ctypes.byref(plc["err"]))
logger.error(
f"cuCtxCreate failed with error code {res}: {str(plc['err'].value)}"
)
try:
res = drv.cuMemGetInfo_v2(
ctypes.byref(plc["free_mem"]), ctypes.byref(plc["total_mem"])
)
except AttributeError:
res = drv.cuMemGetInfo(
ctypes.byref(plc["free_mem"]), ctypes.byref(plc["total_mem"])
)
if res != CUDA_SUCCESS:
drv.cuGetErrorString(res, ctypes.byref(plc["err"]))
logger.error(
f"cuMemGetInfo failed with error code {res}: " f"{str(plc['err'].value)}"
)
_total_mem = plc["total_mem"].value
_free_mem = plc["free_mem"].value
logger.debug(f"Total Memory: {_total_mem} MiB\nFree Memory: {_free_mem} MiB")
drv.cuCtxDetach(plc["context"])
return _total_mem, _free_mem
| 31.692521 | 97 | 0.589022 |
7945395b3ddc4f27e3d7c06365b0dbe369cf5ec9 | 759 | py | Python | pks/templatetags/domainFilters.py | xingyifei2016/clusterCAD | fb139edc90e3b963ac6bfc9f6890f0a4e4f356d6 | [
"BSD-3-Clause-LBNL"
] | 7 | 2018-11-06T00:04:47.000Z | 2021-08-05T04:37:12.000Z | pks/templatetags/domainFilters.py | xingyifei2016/clusterCAD | fb139edc90e3b963ac6bfc9f6890f0a4e4f356d6 | [
"BSD-3-Clause-LBNL"
] | 26 | 2017-08-11T21:51:46.000Z | 2022-03-11T23:18:25.000Z | pks/templatetags/domainFilters.py | xingyifei2016/clusterCAD | fb139edc90e3b963ac6bfc9f6890f0a4e4f356d6 | [
"BSD-3-Clause-LBNL"
] | 7 | 2017-08-16T17:28:40.000Z | 2022-03-02T00:07:00.000Z | from django import template
from django.utils.http import urlquote
from rdkit import Chem
from pks.models import ACP, cMT, PCP, Module
import re
register = template.Library()
@register.filter
def classname(obj):
return obj.__class__.__name__
@register.filter
def smiles(mol):
if mol:
smiles = Chem.MolToSmiles(mol, isomericSmiles=True)
return urlquote(smiles)
else:
return False
@register.filter
def unquotedSmiles(mol):
return Chem.MolToSmiles(mol, isomericSmiles=True)
@register.filter
def stripTrailingVersion(accession):
return re.sub("\.\d+$", "", accession)
@register.filter
def urlq(str):
return urlquote(str)
@register.filter
def sigfig(inputFloat):
return '%s' % float('%.3g' % inputFloat)
| 21.083333 | 59 | 0.71805 |
794539825f09ce2fa3f7863315f644155730b952 | 216 | py | Python | codebraid/codeprocessors/__init__.py | musm/codebraid | 0fabf430050193027470ed2c0920ab4a57d3bf29 | [
"BSD-3-Clause"
] | 270 | 2019-02-26T07:46:56.000Z | 2022-03-23T00:34:09.000Z | codebraid/codeprocessors/__init__.py | musm/codebraid | 0fabf430050193027470ed2c0920ab4a57d3bf29 | [
"BSD-3-Clause"
] | 50 | 2019-02-28T01:49:36.000Z | 2022-03-06T15:00:12.000Z | codebraid/codeprocessors/__init__.py | musm/codebraid | 0fabf430050193027470ed2c0920ab4a57d3bf29 | [
"BSD-3-Clause"
] | 11 | 2019-05-28T21:08:07.000Z | 2022-02-26T21:10:35.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2019, Geoffrey M. Poore
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
from .base import CodeProcessor
| 18 | 45 | 0.694444 |
79453a1b73242bc8d238a891ec92130ba1e993ec | 16,666 | py | Python | My poop analysis.py | PClough/My-2020-year-in-poo | 9c842b2f2688228f071bbfe65288e112c1e3abad | [
"MIT"
] | 2 | 2021-01-01T07:40:37.000Z | 2021-01-01T13:07:45.000Z | My poop analysis.py | PClough/My-2020-year-in-poo | 9c842b2f2688228f071bbfe65288e112c1e3abad | [
"MIT"
] | null | null | null | My poop analysis.py | PClough/My-2020-year-in-poo | 9c842b2f2688228f071bbfe65288e112c1e3abad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Poop analysis
Created 2020
@author: PClough
"""
import pandas as pd
import numpy as np
import chart_studio
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.subplots import make_subplots
from scipy import stats
import datetime as dt
from time import strptime
import calendar
df = pd.read_excel("Poo data.xlsx", engine='openpyxl')
chart_studio.tools.set_credentials_file(username='YOUR USERNAME HERE', api_key='YOUR API HERE')
#%% Violin plot for day of week on x axis and type of poo on y axis
fig2 = go.Figure()
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
for day in days:
fig2.add_trace(go.Violin(x = Date_column[Date_column == day],
y = df['Type of poop 💩? '][Date_column == day],
name = day,
box_visible = True,
meanline_visible = True,
showlegend = False,
fillcolor = 'chocolate',
line = dict(color = 'DarkSalmon')))
fig2.update_layout(yaxis = dict(range=[0.5,7.5]), title = "Average poo type over whole year", font = dict(size = 16))
fig2.update_yaxes(ticks="inside", tick0 = 1, dtick = 1, title = "Bristol stool scale index")
plot(fig2)
# %% Ridgeline plot for day of week on x axis and type of poo on y axis
# 12 rows of data, one for each month
# 7 columns of data, averaging that months poo types
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
New_Date_column = df['When did the poo occur? '].dt.strftime("%b")
i = 0
max_val = 0
data = np.zeros([12,100]) # the value of 100 is just massively oversizing it, assuming there will be less than 100 poo's of a single type in one month
for month in months:
for j in range(1,8):
data[i, np.sum(df['Type of poop 💩? '][New_Date_column == month] == str(j))] = j-1
if max_val < np.sum(df['Type of poop 💩? '][New_Date_column == month] == str(j)):
max_val = np.sum(df['Type of poop 💩? '][New_Date_column == month] == str(j))
i += 1
# Find where the furthest right hand datapoint is and then cut everything off after that
idx = np.arange(max_val+1, 100)
data = np.delete(data, idx, axis=1)
data[data == 0] = 'nan'
fig3 = go.Figure()
for data_line in data:
fig3.add_trace(go.Violin(x=data_line))
fig3.update_traces(orientation='h', side='positive', width=2, points=False)
fig3.update_layout(xaxis_showgrid=False,
xaxis_zeroline=False,
xaxis=dict(range=[0,8]),
title = "Average poo type over whole year",
font = dict(size = 16))
plot(fig3)
#%% Violin plot for day of week on x axis and type of poo on y axis broken out month by month
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
fig4 = make_subplots(rows=2, cols=6, shared_yaxes=True, subplot_titles=(months))
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
row_num = 1
col_num = 0
for month in months:
col_num += 1
if col_num > 6:
col_num = 1
row_num = 2
for day in days:
fig4.add_trace(go.Violin(x = Date_column[Date_column == day][New_Date_column == month],
y = df['Type of poop 💩? '][Date_column == day][New_Date_column == month],
name = month + day,
box_visible = True,
meanline_visible = True,
showlegend = False,
fillcolor = 'chocolate',
line = dict(color = 'DarkSalmon')),
row = row_num, col = col_num)
fig4.update_layout(yaxis = dict(range=[0.5,7.5]), title = "Average poo type, broken down month-by-month", font = dict(size = 16))
fig4.update_yaxes(ticks="inside", col = 1, tick0 = 1, dtick = 1, title = "Bristol stool scale index")
fig4.update_xaxes(ticks="inside")
plot(fig4)
# %% Calendar plot of each day and number of poos, darker colour for more poos
# Number of poos for each day
Num_of_poos = pd.DataFrame()
j = 0
for i in df['When did the poo occur? '].dt.strftime("%x").unique():
Num_of_poos.loc[j, 'Date'] = i
Num_of_poos.loc[j, 'Day'] = pd.to_datetime(i).strftime("%d")
Num_of_poos.loc[j, 'Month'] = pd.to_datetime(i).strftime("%b")
Num_of_poos.loc[j, 'Count'] = (df['When did the poo occur? '].dt.strftime("%x") == i).sum()
j += 1
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
total_poos_in_month = []
plot_titles = []
j = 0
for i in months:
total_poos_in_month.append(int(Num_of_poos['Count'][Num_of_poos['Month'] == i].sum()))
plot_titles.append(i + '<br>Total poopies = ' + str(total_poos_in_month[j]))
j += 1
fig7 = make_subplots(rows = 2, cols = 6, shared_yaxes = True, subplot_titles = plot_titles)
year = 2020
row_num = 1
col_num = 0
for month in months:
col_num += 1
if col_num > 6:
col_num = 1
row_num = 2
MyMonthData = calendar.monthcalendar(2020, strptime(month, '%b').tm_mon)
z = MyMonthData[::-1]
m = 0
for i in z:
n = 0
for j in i:
if j == 0:
z[m].pop(n)
z[m].insert(n, '')
elif any((Num_of_poos['Day'] == str(j).zfill(2)) & (Num_of_poos['Month'] == month)) == False:
z[m].pop(n)
z[m].insert(n, 0)
else:
z[m].pop(n)
z[m].insert(n, int(Num_of_poos.loc[(Num_of_poos['Day'] == str(j).zfill(2)) & (Num_of_poos['Month'] == month), 'Count']))
n += 1
m += 1
name = []
for a in calendar.Calendar().monthdatescalendar(year, strptime(month, '%b').tm_mon):
for b in a:
name.append(b.strftime("%d %b %Y"))
name = np.reshape([inner for inner in name], (len(MyMonthData), 7))
name = name[::-1]
fig7.add_trace(go.Heatmap(
x = days,
y = list(range(len(MyMonthData), 0)),
z = z,
meta = name,
hovertemplate = 'Date: %{meta} <br>Number of poos: %{z}<extra></extra>',
xgap = 1, ygap = 1,
zmin = 0, zmax = max(Num_of_poos['Count']),
# colorscale = "turbid"),
colorscale = [
[0, 'rgb(249, 238, 229)'], # 0 for the prettiness
[0.14, 'rgb(249, 230, 217)'], # 0
[0.29, 'rgb(204, 153, 102)'], # 1
[0.43, 'rgb(153, 102, 51)'], # 2
[0.57, 'rgb(115, 77, 38)'], # 3
[0.71, 'rgb(77, 51, 25)'], # 4
[1, 'rgb(38, 26, 13)']]), # 5
row = row_num, col = col_num)
fig7['layout'].update(plot_bgcolor = 'white',
title_text = "Poopy calendar",
yaxis_showticklabels = False,
yaxis7_showticklabels = False,
font = dict(size = 16))
plot(fig7)
# %% Distribution of poos on stool scale per day
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for day in days:
ydata.append((len(df['Type of poop 💩? '][Date_column == day])/Total_poos)*100)
fig9 = go.Figure()
fig9.add_trace(go.Bar(x = days,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
name = day,
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig9.update_layout(title = "Poo distribution by day", font = dict(size = 16))
fig9.update_yaxes(range=[0, 20], ticks = "inside", title = "Percentage of poos / %")
fig9.update_xaxes(title = "Day of week")
plot(fig9)
#should make this a stacked bar chart of type of poo stacked with the total number of poos as the overall height.
#%% Most frequent time of day
timerange = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23']
X_titles = [t + ':00' for t in timerange]
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Time_column = df['When did the poo occur? '].dt.strftime("%H")
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for t in timerange:
ydata.append((len(df['Type of poop 💩? '][Time_column == t])/Total_poos)*100)
fig10 = go.Figure()
fig10.add_trace(go.Bar(x = timerange,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig10.update_layout(title = "Poo distribution by time", font = dict(size = 16))
fig10.update_yaxes(range=[0, 20], ticks = "inside", title = "Percentage of poos / %")
fig10.update_xaxes(ticks = "inside", title = "Time of day", tickmode = 'array', tickvals = [int(t) for t in timerange], ticktext = X_titles)
plot(fig10)
# %% Distribution by type
Type_of_poop = [str(i) for i in range(1,8)] # 1 to 7
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for poo in Type_of_poop:
ydata.append((sum(df['Type of poop 💩? '] == poo)/Total_poos)*100)
fig11 = go.Figure()
fig11.add_trace(go.Bar(x = Type_of_poop,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig11.update_layout(title = "Poo distribution by type", font = dict(size = 16))
fig11.update_yaxes(range=[0, 60], ticks = "inside", title = "Percentage of poos / %")
fig11.update_xaxes(title = "Type of poo")
plot(fig11)
# %% Distribution by type excluding Jan and Feb
Type_of_poop = [str(i) for i in range(1,8)] # 1 to 7
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Total_poos = len(df['Type of poop 💩? '])
ydata = []
for poo in Type_of_poop:
ydata.append(sum(np.logical_and(df['Type of poop 💩? '] == poo, df['When did the poo occur? '].dt.strftime("%m") > '02')/Total_poos)*100)
fig12 = go.Figure()
fig12.add_trace(go.Bar(x = Type_of_poop,
y = ydata,
hovertemplate = '%{y:.1f}%<extra></extra>',
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig12.update_layout(title = "Poo distribution by type (excluding Jan and Feb)", font = dict(size = 16))
fig12.update_yaxes(range=[0, 60], ticks = "inside", title = "Percentage of poos / %")
fig12.update_xaxes(title = "Type of poo")
plot(fig12)
#%% Poo stats
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
# Number of poos for each day
Num_type_of_poos = pd.DataFrame()
j = 0
for i in df['When did the poo occur? '].dt.strftime("%x").unique():
Num_type_of_poos.loc[j, 'Date'] = i
Num_type_of_poos.loc[j, 'Day'] = pd.to_datetime(i).strftime("%d")
Num_type_of_poos.loc[j, 'Month'] = pd.to_datetime(i).strftime("%b")
Num_type_of_poos.loc[j, 'Count'] = (df['When did the poo occur? '].dt.strftime("%x") == i).sum()
Num_type_of_poos.loc[j, 'Type'] = np.abs(int(df['Type of poop 💩? '][j]) - 4)
j += 1
# Max number of poos in a day, week, month
Max_poopys = np.max(Num_type_of_poos['Count'])
print('Max poos in a day =', Max_poopys
# Total poos in a year
Total_annual_poos = np.size(Num_type_of_poos, 0)
print('Total poos in a year =', Total_annual_poos)
# Total days without poos
# Create a list of dates in each year
# Remove dates based on if the year is not 2020 and then remove duplicate dates (check order duplicates though)
flat_list = []
for sublist in calendar.Calendar().yeardatescalendar(2020):
for item3 in sublist:
for item2 in item3:
for item in item2:
if item.strftime("%Y") != '2020':
continue
else:
flat_list.append(item)
# Remove duplicates
flat_list = list(dict.fromkeys(flat_list))
# Produce list of dates of poos
new_date_list = []
for i in Num_type_of_poos['Date']:
new_date_list.append(dt.datetime.strptime(i, '%m/%d/%y').date())
Total_no_poo_days = 0
for i in flat_list:
if i not in new_date_list:
Total_no_poo_days += 1
print('Total number of days without a poo =', Total_no_poo_days)
# Total days with 3 or more poos
# Average poo's per day, week, month
# Longest poo streak
Longest_poo_streak = 0
poo_streak = 0
for i in flat_list:
if i in new_date_list:
poo_streak += 1
else:
poo_streak = 0
# print(poo_streak)
if poo_streak > Longest_poo_streak:
date_of_end = i
# date_of_start = i
Longest_poo_streak = poo_streak
print('Longest poo streak =', Longest_poo_streak, ' ended =', dt.datetime.strftime(date_of_end, "%d %B %Y"))
# Longest time between poos
Longest_time_between_poos = dt.timedelta(0)
poo_time = dt.timedelta(0)
prev_time = df['When did the poo occur? '][0]
for i in df['When did the poo occur? '][1::]:
poo_time = i - prev_time
prev_time = i
if poo_time > Longest_time_between_poos:
date_of_end = i
Longest_time_between_poos = poo_time
print('Longest time between poos =', Longest_time_between_poos, ' ended =', dt.datetime.strftime(date_of_end, "%d %B %Y %H:%M:%S"))
# Shortest time between poos
Shortest_time_between_poos = dt.timedelta(0)
poo_time = dt.timedelta(0)
prev_time = df['When did the poo occur? '][0]
for i in df['When did the poo occur? '][1::]:
poo_time = i - prev_time
prev_time = i
if poo_time < Shortest_time_between_poos:
date_of_end = i
Shortest_time_between_poos = poo_time
if Shortest_time_between_poos.days < 0:
Shortest_time_between_poos = dt.timedelta(days=0, seconds=Shortest_time_between_poos.seconds, microseconds=Shortest_time_between_poos.microseconds)
print('Shortest time between poos =', Shortest_time_between_poos, ' ended =', dt.datetime.strftime(date_of_end, "%d %B %Y %H:%M:%S"))
# Average and median time between poos
poo_time = []
prev_time = df['When did the poo occur? '][0]
for i in df['When did the poo occur? '][1::]:
poo_time.append(i - prev_time)
prev_time = i
Average_time_between_poos = np.mean(poo_time)
print('Average time between poos =', Average_time_between_poos)
Median_time_between_poos = np.median(poo_time)
print('Median time between poos =', Median_time_between_poos)
Mode_time_between_poos = stats.mode(poo_time)
print('Mode time between poos =', Mode_time_between_poos)
#%% Plot distribution of poos
# x = time between poos in 1 hour time ranges
# y = frequency of poos in time ranges
x_data = range(0, int(max(poo_time).seconds/3600 + max(poo_time).days*24))
# convert the list of timedeltas to hours
pt = []
for j in poo_time:
pt.append(j.seconds/3600 + j.days*24)
# count how many fall within the hourly time brackets
prev_i = x_data[0]
y_data = []
count = 0
for i in x_data[1::]:
for j in pt:
if j < i and j > prev_i:
count += 1
y_data.append(count)
count = 0
prev_i = i
fig13 = go.Figure()
fig13.add_trace(go.Bar(x = list(x_data),
y = y_data,
hovertemplate = '%{y:.1f}%<extra></extra>',
showlegend = False,
marker_color = ('rgb(166,86,50)')))
fig13.update_layout(title = "Poo distribution by time since last poo", font = dict(size = 16))
fig13.update_yaxes(range=[0, 40], ticks = "inside", title = "Percentage of poos / %")
fig13.update_xaxes(title = "Time since last poo (hours)")
plot(fig13)
| 34.151639 | 160 | 0.593784 |
79453a32521440527075b87e88247a34a306036b | 548 | py | Python | stadiumpy/__init__.py | apexcodings/stadiumpyGUI | 436723dd40847b5ca1c54327ac083129f31e75da | [
"MIT"
] | 3 | 2020-12-30T07:40:47.000Z | 2022-03-24T18:48:57.000Z | stadiumpy/__init__.py | apexcodings/stadiumpyGUI | 436723dd40847b5ca1c54327ac083129f31e75da | [
"MIT"
] | null | null | null | stadiumpy/__init__.py | apexcodings/stadiumpyGUI | 436723dd40847b5ca1c54327ac083129f31e75da | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Initialize the library.
:copyright: 2020 STADIUMPy
:license: MIT
"""
# from stadiumpy.stadiumpy.startpage import StartPage
# from stadiumpy.prf_page import prfview
# from stadiumpy.srf_page import srfview
# from stadiumpy.dataenquirypage import dataenquiry
# from stadiumpy.sks_page import sksview
# from stadiumpy.page_control import PageControl
# from stadiumpy.plot_map_gui import plotMap
print("Hello from ", __name__)
__version__ = "unknown"
try:
from ._version import __version__
except ImportError:
pass | 26.095238 | 53 | 0.781022 |
79453b48f2ff15912a20f3ac7d46160d32c04dab | 165 | py | Python | src/my_pkg/main_character.py | satamame/my_pkg | d22aaa3b0a101b37654189dded3e96d10871dd29 | [
"MIT"
] | null | null | null | src/my_pkg/main_character.py | satamame/my_pkg | d22aaa3b0a101b37654189dded3e96d10871dd29 | [
"MIT"
] | null | null | null | src/my_pkg/main_character.py | satamame/my_pkg | d22aaa3b0a101b37654189dded3e96d10871dd29 | [
"MIT"
] | null | null | null | from .character import Character
class MainCharacter(Character):
def __init__(self, name='ヒロ', age=14):
self.name = name
super().__init__(age)
| 20.625 | 42 | 0.660606 |
79453c8934438baba6e79fbdbe6563865bc3264d | 10,142 | py | Python | lib/fast_rcnn/config.py | candacelax/bottom-up-attention | dea4e48d71aa7d9abba5a3b4a338e3d688a76a79 | [
"MIT"
] | null | null | null | lib/fast_rcnn/config.py | candacelax/bottom-up-attention | dea4e48d71aa7d9abba5a3b4a338e3d688a76a79 | [
"MIT"
] | null | null | null | lib/fast_rcnn/config.py | candacelax/bottom-up-attention | dea4e48d71aa7d9abba5a3b4a338e3d688a76a79 | [
"MIT"
] | 1 | 2020-03-20T10:27:02.000Z | 2020-03-20T10:27:02.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.RPN_NORMALIZE_TARGETS = False
__C.TRAIN.RPN_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.RPN_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'selective_search'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# whether use class aware box or not
__C.TRAIN.AGNOSTIC = False
# Detect attributes of objects
__C.TRAIN.HAS_ATTRIBUTES = False
# Detect relations between objects
__C.TRAIN.HAS_RELATIONS = False
# Fraction of relation minibatch that is labeled with a relation (i.e. class > 0)
__C.TRAIN.MIN_RELATION_FRACTION = 0.25
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Flag for soft-NMS method. 0 performs standard NMS, 1 performs soft-NMS with linear weighting and
# 2 performs soft-NMS with Gaussian weighting
__C.TEST.SOFT_NMS = 0
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'selective_search'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
# whether use class aware box or not
__C.TEST.AGNOSTIC = False
# Detect attributes of objects
__C.TEST.HAS_ATTRIBUTES = False
# Detect relations between objects
__C.TEST.HAS_RELATIONS = False
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Model directory
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net=None, attributes=False):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is not None:
outdir = osp.join(outdir, net.name)
if attributes:
outdir = osp.join(outdir, "attr")
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f, Loader=yaml.FullLoader))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 32.402556 | 99 | 0.696312 |
79453def3eb8d6a2b896a00dc7f7c2511a321d06 | 3,871 | py | Python | FASTQ_Preprocess.py | pkMyt1/ScarMapper | 39ebc5b15a6224e8002cb5748eeb60397f8a4b06 | [
"MIT"
] | 4 | 2020-10-28T15:36:45.000Z | 2022-01-04T14:47:08.000Z | FASTQ_Preprocess.py | pkMyt1/ScarMapper | 39ebc5b15a6224e8002cb5748eeb60397f8a4b06 | [
"MIT"
] | 11 | 2020-03-23T12:43:14.000Z | 2020-10-14T19:14:59.000Z | FASTQ_Preprocess.py | pkMyt1/ScarMapper | 39ebc5b15a6224e8002cb5748eeb60397f8a4b06 | [
"MIT"
] | null | null | null | """
@author: Dennis A. Simpson
University of North Carolina at Chapel Hill
Chapel Hill, NC 27599
@copyright: 2019
"""
import datetime
import os
import collections
import subprocess
import argparse
import sys
import time
from distutils.util import strtobool
from scipy.stats import gmean
from scarmapper import TargetMapper as Target_Mapper, INDEL_Processing as Indel_Processing
from Valkyries import Tool_Box, Version_Dependencies as VersionDependencies, FASTQ_Tools
__author__ = 'Dennis A. Simpson'
__version__ = '0.1.0'
def main(command_line_args=None):
VersionDependencies.python_check()
if not command_line_args:
command_line_args = sys.argv
run_start = datetime.datetime.today().strftime("%a %b %d %H:%M:%S %Y")
parser = argparse.ArgumentParser(description="A little ditty to manipulate FASTQ files.\n {0} v{1}"
.format(__package__, __version__), formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--options_file', action='store', dest='options_file', required=True,
help='File containing program parameters.')
options_parser = Tool_Box.options_file(parser)
args = options_parser.parse_args()
# args, options_parser = string_to_boolean(args, options_parser)
options_parser.set_defaults(Trim5=0)
options_parser.set_defaults(Trim3=0)
options_parser.set_defaults(Minimum_Length=100)
options_parser.set_defaults(N_Limit=100)
options_parser.set_defaults(HaloPLEX=False)
options_parser.set_defaults(ThruPLEX=False)
options_parser.set_defaults(FASTQ_PreProcess=True)
args = options_parser.parse_args()
# Check options file for errors.
error_checking(args)
log = Tool_Box.Logger(args)
Tool_Box.log_environment_info(log, args, command_line_args)
start_time = time.time()
module_name = ""
# Initialize generator to read each FASTQ file
fastq1 = FASTQ_Tools.FASTQ_Reader(args.FASTQ1, log)
fastq2 = FASTQ_Tools.FASTQ_Reader(args.FASTQ2, log)
index1 = FASTQ_Tools.FASTQ_Reader(args.Index1, log)
index2 = FASTQ_Tools.FASTQ_Reader(args.Index2, log)
splitter_data = FASTQ_Tools.FastqSplitter(args, log, fastq1, fastq2, index1, index2, paired_end=True)
new_fastq1, new_fastq2 = splitter_data.file_writer()
warning = "\033[1;31m **See warnings above**\033[m" if log.warning_occurred else ''
elapsed_time = int(time.time() - start_time)
log.info("****FASTQ Preprocessing {0} complete ({1} seconds, {2} Mb peak memory).****"
.format(module_name, elapsed_time, Tool_Box.peak_memory(), warning))
def error_checking(args):
"""
Check parameter file for errors.
:param args:
"""
if not os.path.exists(args.Working_Folder):
print("\033[1;31mERROR:\n\tWorking Folder Path: {} Not Found. Check Options File."
.format(args.Working_Folder))
raise SystemExit(1)
if getattr(args, "FASTQ1", False) and not os.path.isfile(args.FASTQ1):
print("\033[1;31mERROR:\n\t--FASTQ1: {} Not Found. Check Options File."
.format(args.FASTQ1))
raise SystemExit(1)
if getattr(args, "FASTQ2", False) and not os.path.isfile(args.FASTQ2):
print("\033[1;31mERROR:\n\t--FASTQ2: {} Not Found. Check Options File."
.format(args.FASTQ2))
raise SystemExit(1)
if getattr(args, "Index1", False) and not os.path.isfile(args.Index1):
print("\033[1;31mERROR:\n\t--Index1: {} Not Found. Check Options File."
.format(args.Index1))
raise SystemExit(1)
if getattr(args, "Index2", False) and not os.path.isfile(args.Index2):
print("\033[1;31mERROR:\n\t--Index2: {} Not Found. Check Options File."
.format(args.Index1))
raise SystemExit(1)
if __name__ == '__main__':
main() | 37.221154 | 118 | 0.690519 |
79453fa87f0a0447989673bec807b4ef3bb1ce28 | 381 | py | Python | PyMOTW/source/os.path/ospath_join.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | PyMOTW/source/os.path/ospath_join.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2020-07-18T03:52:03.000Z | 2020-07-18T04:18:01.000Z | PyMOTW/source/os.path/ospath_join.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 2 | 2021-03-06T04:28:32.000Z | 2021-03-06T04:59:17.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Combine path components to create a single path.
"""
#end_pymotw_header
import os.path
PATHS = [
('one', 'two', 'three'),
('/', 'one', 'two', 'three'),
('/one', '/two', '/three'),
]
for parts in PATHS:
print('{} : {!r}'.format(parts, os.path.join(*parts)))
| 18.142857 | 58 | 0.590551 |
794540df37732c1dae7f6a73410031d2dc9b7495 | 2,234 | py | Python | src/opserver/plugins/alarm_base.py | UbuntuEvangelist/contrail-controller | 4e8a992230f8f8e91e4f753e19b5442d9e1b446d | [
"Apache-2.0"
] | null | null | null | src/opserver/plugins/alarm_base.py | UbuntuEvangelist/contrail-controller | 4e8a992230f8f8e91e4f753e19b5442d9e1b446d | [
"Apache-2.0"
] | null | null | null | src/opserver/plugins/alarm_base.py | UbuntuEvangelist/contrail-controller | 4e8a992230f8f8e91e4f753e19b5442d9e1b446d | [
"Apache-2.0"
] | 18 | 2017-01-12T09:28:44.000Z | 2019-04-18T20:47:42.000Z | import abc
from collections import namedtuple
class AlarmBase(object):
"""Base class for Alarms
"""
SYS_EMERG, SYS_ALERT, SYS_CRIT, SYS_ERR,\
SYS_WARN, SYS_NOTICE, SYS_INFO, SYS_DEBUG = range(8)
_RULES = None
def __init__(self, sev, at=0, it=0, fec=False, fcs=0, fct=0):
self._config = None
self._sev = sev
self._ActiveTimer = at
self._IdleTimer = it
self._FreqExceededCheck = fec
self._FreqCheck_Times = fct
self._FreqCheck_Seconds = fcs
def rules(self):
"""Return the rules for this alarm
"""
return self._RULES
def config(self):
"""Return the config object for this alarm
"""
return self._config
def severity(self):
"""Return the severity of the alarm
This should not depend on UVE contents
"""
return self._sev
def FreqCheck_Times(self):
"""Return the number of times an alarm should be enabled
for FreqExceededCheck
"""
return self._FreqCheck_Times
def FreqCheck_Seconds(self):
"""Return the number of seconds in which FreqExceededCheck
should be checked
"""
return self._FreqCheck_Seconds
def FreqExceededCheck(self):
"""Return whether FreqExceededCheck is enabled
"""
return self._FreqExceededCheck
def IdleTimer(self):
"""Return the soak time value for clearing the alarm
This should be 0 if there is no need of soak time
"""
return self._IdleTimer
def ActiveTimer(self):
"""Return the soak time value for setting the alarm
This should be 0 if there is no need of soak time
"""
return self._ActiveTimer
def set_config(self, alarm_cfg_obj):
"""Set the alarm config object for this alarm
"""
self._config = alarm_cfg_obj
#def __call__(self, uve_key, uve_data):
"""Evaluate whether alarm should be raised.
Implement this method if you want to override the generic
alarm processing engine.
:param uve_key: Key of the UVE (a string)
:param uve_data: UVE Contents
:returns: list of AlarmRuleMatch
"""
| 27.925 | 66 | 0.623993 |
794541b5fa4f16ec1c5f5a043a4ea43f18a71543 | 1,780 | py | Python | data_structures/stack/stack_using_linked_list.py | FatiahBalo/python-ds | 9eb88425822b6da4d7bd673a124c13fbe6f17523 | [
"MIT"
] | 1,723 | 2019-07-30T07:06:22.000Z | 2022-03-31T15:22:22.000Z | data_structures/stack/stack_using_linked_list.py | FatiahBalo/python-ds | 9eb88425822b6da4d7bd673a124c13fbe6f17523 | [
"MIT"
] | 213 | 2019-10-06T08:07:47.000Z | 2021-10-04T15:38:36.000Z | data_structures/stack/stack_using_linked_list.py | FatiahBalo/python-ds | 9eb88425822b6da4d7bd673a124c13fbe6f17523 | [
"MIT"
] | 628 | 2019-10-06T10:26:25.000Z | 2022-03-31T01:41:00.000Z | """ Use LinkedList class to implement a Stack. """
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def insert_first(self, new_element):
"Insert new element as the head of the LinkedList"
# fetch the current head
current = self.head
new_element.next = current
self.head = new_element
def delete_first(self):
"Delete the first (head) element in the LinkedList and return it"
current = self.head
if current:
if current.next:
self.head = current.next
else:
self.head = None
return current
else:
return None
class Stack(object):
def __init__(self,top=None):
self.ll = LinkedList(top)
def push(self, new_element):
"Push (add) a new element onto the top of the stack"
self.ll.insert_first(new_element)
def pop(self):
"Pop (remove) the first element off the top of the stack and return it"
return self.ll.delete_first()
# Test cases
# Set up some Elements
e1 = Element(1)
e2 = Element(2)
e3 = Element(3)
e4 = Element(4)
# Start setting up a Stack
stack = Stack(e1)
# Test stack functionality
stack.push(e2)
stack.push(e3)
print(stack.pop().value)
print(stack.pop().value)
print(stack.pop().value)
print(stack.pop())
stack.push(e4)
print(stack.pop().value)
| 25.070423 | 79 | 0.600562 |
794542246562967703384b9fe6a6da547edd2814 | 1,678 | py | Python | test/test_send_report_email.py | Danilka/APIv3-python-library | c96472f47d652d2e09e8b4a48a80e33fde06e7f1 | [
"MIT"
] | null | null | null | test/test_send_report_email.py | Danilka/APIv3-python-library | c96472f47d652d2e09e8b4a48a80e33fde06e7f1 | [
"MIT"
] | null | null | null | test/test_send_report_email.py | Danilka/APIv3-python-library | c96472f47d652d2e09e8b4a48a80e33fde06e7f1 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sib_api_v3_sdk
from sib_api_v3_sdk.models.send_report_email import SendReportEmail # noqa: E501
from sib_api_v3_sdk.rest import ApiException
class TestSendReportEmail(unittest.TestCase):
"""SendReportEmail unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSendReportEmail(self):
"""Test SendReportEmail"""
# FIXME: construct object with mandatory attributes with example values
# model = sib_api_v3_sdk.models.send_report_email.SendReportEmail() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 40.926829 | 820 | 0.675209 |
794542827e17c56ddafcf8d99aaabfde15db912b | 1,715 | py | Python | reachy_pyluos_hal/tools/reachy_dynamixel_config.py | pollen-robotics/reachy_pyluos_hal | dcd2ddc937c60170907f258994590fc9ecfe5d7f | [
"Apache-2.0"
] | null | null | null | reachy_pyluos_hal/tools/reachy_dynamixel_config.py | pollen-robotics/reachy_pyluos_hal | dcd2ddc937c60170907f258994590fc9ecfe5d7f | [
"Apache-2.0"
] | 2 | 2021-04-07T15:34:45.000Z | 2022-01-26T09:03:44.000Z | reachy_pyluos_hal/tools/reachy_dynamixel_config.py | pollen-robotics/reachy_pyluos_hal | dcd2ddc937c60170907f258994590fc9ecfe5d7f | [
"Apache-2.0"
] | null | null | null | """Command line utility tool to configure Dynamixel motor by using the Reachy configuration file."""
from subprocess import call
from ..config import load_config
from ..dynamixel import DynamixelMotor, DynamixelError
def main():
"""Run main entry point."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('port')
parser.add_argument('config_file')
parser.add_argument('joint_name')
parser.add_argument('--return-delay-time', type=int, default=20)
parser.add_argument('--temperature-limit', type=int, default=55)
parser.add_argument('--alarm-shutdown', choices=[err.name for err in DynamixelError], nargs='+', default=[DynamixelError.OverheatingError])
args = parser.parse_args()
config = load_config(args.config_file)
dynamixel_motors = {}
for part in config:
dynamixel_motors.update({
name: device
for name, device in part.items()
if isinstance(device, DynamixelMotor)
})
if args.joint_name not in dynamixel_motors:
raise ValueError(f'Error: The joint name should be one of {dynamixel_motors.keys()}!')
dxl = dynamixel_motors[args.joint_name]
cmd = [
'dynamixel-config',
args.port,
'--id', str(dxl.id),
'--return-delay-time', str(args.return_delay_time),
'--cw-angle-limit', str(dxl.cw_angle_limit),
'--ccw-angle-limit', str(dxl.ccw_angle_limit),
'--temperature-limit', str(args.temperature_limit),
'--alarm-shutdown', ' '.join([error.name for error in args.alarm_shutdown]),
]
print(f'Will now run cmd \"{" ".join(cmd)}\"')
print(call(cmd))
if __name__ == '__main__':
main()
| 32.980769 | 143 | 0.660641 |
794542ad2ed6fef126c06e82eee150b72c58102c | 9,277 | py | Python | fastai/tabular/data.py | claudiosv/fastai | e36fc3dc9f6f7a9b9ae5134486bfb2b9388e7a98 | [
"Apache-2.0"
] | 3 | 2019-07-26T11:25:59.000Z | 2021-12-20T04:13:18.000Z | fastai/tabular/data.py | claudiosv/fastai | e36fc3dc9f6f7a9b9ae5134486bfb2b9388e7a98 | [
"Apache-2.0"
] | 2 | 2021-09-28T05:55:12.000Z | 2022-02-26T11:19:08.000Z | fastai/tabular/data.py | claudiosv/fastai | e36fc3dc9f6f7a9b9ae5134486bfb2b9388e7a98 | [
"Apache-2.0"
] | 1 | 2020-04-04T10:22:30.000Z | 2020-04-04T10:22:30.000Z | "Data loading pipeline for structured data support. Loads from pandas DataFrame"
from ..torch_core import *
from .transform import *
from ..basic_data import *
from ..data_block import *
from ..basic_train import *
from .models import *
from pandas.api.types import is_numeric_dtype, is_categorical_dtype
__all__ = ['TabularDataBunch', 'TabularLine', 'TabularList', 'TabularProcessor', 'tabular_learner']
OptTabTfms = Optional[Collection[TabularProc]]
#def emb_sz_rule(n_cat:int)->int: return min(50, (n_cat//2)+1)
def emb_sz_rule(n_cat:int)->int: return min(600, round(1.6 * n_cat**0.56))
def def_emb_sz(classes, n, sz_dict=None):
"Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`."
sz_dict = ifnone(sz_dict, {})
n_cat = len(classes[n])
sz = sz_dict.get(n, int(emb_sz_rule(n_cat))) # rule of thumb
return n_cat,sz
class TabularLine(ItemBase):
"Basic item for tabular data."
def __init__(self, cats, conts, classes, names):
self.cats,self.conts,self.classes,self.names = cats,conts,classes,names
self.data = [tensor(cats), tensor(conts)]
def __str__(self):
res = ''
for c, n in zip(self.cats, self.names[:len(self.cats)]):
res += f"{n} {(self.classes[n][c])}; "
for c,n in zip(self.conts, self.names[len(self.cats):]):
res += f'{n} {c:.4f}; '
return res
class TabularProcessor(PreProcessor):
"Regroup the `procs` in one `PreProcessor`."
def __init__(self, ds:ItemBase=None, procs=None):
procs = ifnone(procs, ds.procs if ds is not None else None)
self.procs = listify(procs)
def process_one(self, item):
df = pd.DataFrame([item,item])
for proc in self.procs: proc(df, test=True)
if len(self.cat_names) != 0:
codes = np.stack([c.cat.codes.values for n,c in df[self.cat_names].items()], 1).astype(np.int64) + 1
else: codes = [[]]
if len(self.cont_names) != 0:
conts = np.stack([c.astype('float32').values for n,c in df[self.cont_names].items()], 1)
else: conts = [[]]
classes = None
col_names = list(df[self.cat_names].columns.values) + list(df[self.cont_names].columns.values)
return TabularLine(codes[0], conts[0], classes, col_names)
def process(self, ds):
if ds.inner_df is None:
ds.classes,ds.cat_names,ds.cont_names = self.classes,self.cat_names,self.cont_names
ds.preprocessed = True
return
for i,proc in enumerate(self.procs):
if isinstance(proc, TabularProc): proc(ds.inner_df, test=True)
else:
#cat and cont names may have been changed by transform (like Fill_NA)
proc = proc(ds.cat_names, ds.cont_names)
proc(ds.inner_df)
ds.cat_names,ds.cont_names = proc.cat_names,proc.cont_names
self.procs[i] = proc
self.cat_names,self.cont_names = ds.cat_names,ds.cont_names
if len(ds.cat_names) != 0:
ds.codes = np.stack([c.cat.codes.values for n,c in ds.inner_df[ds.cat_names].items()], 1).astype(np.int64) + 1
self.classes = ds.classes = OrderedDict({n:np.concatenate([['#na#'],c.cat.categories.values])
for n,c in ds.inner_df[ds.cat_names].items()})
cat_cols = list(ds.inner_df[ds.cat_names].columns.values)
else: ds.codes,ds.classes,self.classes,cat_cols = None,None,None,[]
if len(ds.cont_names) != 0:
ds.conts = np.stack([c.astype('float32').values for n,c in ds.inner_df[ds.cont_names].items()], 1)
cont_cols = list(ds.inner_df[ds.cont_names].columns.values)
else: ds.conts,cont_cols = None,[]
ds.col_names = cat_cols + cont_cols
ds.preprocessed = True
class TabularDataBunch(DataBunch):
"Create a `DataBunch` suitable for tabular data."
@classmethod
def from_df(cls, path, df:DataFrame, dep_var:str, valid_idx:Collection[int], procs:OptTabTfms=None,
cat_names:OptStrList=None, cont_names:OptStrList=None, classes:Collection=None,
test_df=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None,
device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False)->DataBunch:
"Create a `DataBunch` from `df` and `valid_idx` with `dep_var`. `kwargs` are passed to `DataBunch.create`."
cat_names = ifnone(cat_names, []).copy()
cont_names = ifnone(cont_names, list(set(df)-set(cat_names)-{dep_var}))
procs = listify(procs)
src = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(valid_idx))
src = src.label_from_df(cols=dep_var) if classes is None else src.label_from_df(cols=dep_var, classes=classes)
if test_df is not None: src.add_test(TabularList.from_df(test_df, cat_names=cat_names, cont_names=cont_names,
processor = src.train.x.processor))
return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device,
collate_fn=collate_fn, no_check=no_check)
class TabularList(ItemList):
"Basic `ItemList` for tabular data."
_item_cls=TabularLine
_processor=TabularProcessor
_bunch=TabularDataBunch
def __init__(self, items:Iterator, cat_names:OptStrList=None, cont_names:OptStrList=None,
procs=None, **kwargs)->'TabularList':
super().__init__(range_of(items), **kwargs)
#dataframe is in inner_df, items is just a range of index
if cat_names is None: cat_names = []
if cont_names is None: cont_names = []
self.cat_names,self.cont_names,self.procs = cat_names,cont_names,procs
self.copy_new += ['cat_names', 'cont_names', 'procs']
self.preprocessed = False
@classmethod
def from_df(cls, df:DataFrame, cat_names:OptStrList=None, cont_names:OptStrList=None, procs=None, **kwargs)->'ItemList':
"Get the list of inputs in the `col` of `path/csv_name`."
return cls(items=range(len(df)), cat_names=cat_names, cont_names=cont_names, procs=procs, inner_df=df.copy(), **kwargs)
def get(self, o):
if not self.preprocessed: return self.inner_df.iloc[o] if hasattr(self, 'inner_df') else self.items[o]
codes = [] if self.codes is None else self.codes[o]
conts = [] if self.conts is None else self.conts[o]
return self._item_cls(codes, conts, self.classes, self.col_names)
def get_emb_szs(self, sz_dict=None):
"Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`."
return [def_emb_sz(self.classes, n, sz_dict) for n in self.cat_names]
def reconstruct(self, t:Tensor):
return self._item_cls(t[0], t[1], self.classes, self.col_names)
def show_xys(self, xs, ys)->None:
"Show the `xs` (inputs) and `ys` (targets)."
from IPython.display import display, HTML
items,names = [], xs[0].names + ['target']
for i, (x,y) in enumerate(zip(xs,ys)):
res = []
cats = x.cats if len(x.cats.size()) > 0 else []
conts = x.conts if len(x.conts.size()) > 0 else []
for c, n in zip(cats, x.names[:len(cats)]):
res.append(x.classes[n][c])
res += [f'{c:.4f}' for c in conts] + [y]
items.append(res)
items = np.array(items)
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
with pd.option_context('display.max_colwidth', -1):
display(HTML(df.to_html(index=False)))
def show_xyzs(self, xs, ys, zs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions)."
from IPython.display import display, HTML
items,names = [], xs[0].names + ['target', 'prediction']
for i, (x,y,z) in enumerate(zip(xs,ys,zs)):
res = []
cats = x.cats if len(x.cats.size()) > 0 else []
conts = x.conts if len(x.conts.size()) > 0 else []
for c, n in zip(cats, x.names[:len(cats)]):
res.append(str(x.classes[n][c]))
res += [f'{c:.4f}' for c in conts] + [y, z]
items.append(res)
items = np.array(items)
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
with pd.option_context('display.max_colwidth', -1):
display(HTML(df.to_html(index=False)))
def tabular_learner(data:DataBunch, layers:Collection[int], emb_szs:Dict[str,int]=None, metrics=None,
ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, **learn_kwargs):
"Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params."
emb_szs = data.get_emb_szs(ifnone(emb_szs, {}))
model = TabularModel(emb_szs, len(data.cont_names), out_sz=data.c, layers=layers, ps=ps, emb_drop=emb_drop,
y_range=y_range, use_bn=use_bn)
return Learner(data, model, metrics=metrics, **learn_kwargs)
| 52.117978 | 134 | 0.630376 |
794542af83b191e08a447b1b6487c48d5c5531ce | 16,144 | py | Python | pyfolio/utils.py | dimitar-petrov/pyfolio | 39758af5a34ab2ae3359b0e2c82675de0661dcc2 | [
"Apache-2.0"
] | 4,542 | 2015-07-14T02:34:19.000Z | 2022-03-31T02:12:06.000Z | pyfolio/utils.py | anojangra/pyfolio | 4b901f6d73aa02ceb6d04b7d83502e5c6f2e81aa | [
"Apache-2.0"
] | 558 | 2015-07-14T18:16:43.000Z | 2022-03-15T02:22:23.000Z | pyfolio/utils.py | anojangra/pyfolio | 4b901f6d73aa02ceb6d04b7d83502e5c6f2e81aa | [
"Apache-2.0"
] | 1,572 | 2015-07-15T23:06:09.000Z | 2022-03-31T17:54:33.000Z | #
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import warnings
from itertools import cycle
from matplotlib.pyplot import cm
import numpy as np
import pandas as pd
from IPython.display import display, HTML
import empyrical.utils
from . import pos
from . import txn
APPROX_BDAYS_PER_MONTH = 21
APPROX_BDAYS_PER_YEAR = 252
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
MM_DISPLAY_UNIT = 1000000.
DAILY = 'daily'
WEEKLY = 'weekly'
MONTHLY = 'monthly'
YEARLY = 'yearly'
ANNUALIZATION_FACTORS = {
DAILY: APPROX_BDAYS_PER_YEAR,
WEEKLY: WEEKS_PER_YEAR,
MONTHLY: MONTHS_PER_YEAR
}
COLORMAP = 'Paired'
COLORS = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231',
'#911eb4', '#46f0f0', '#f032e6', '#d2f53c', '#fabebe',
'#008080', '#e6beff', '#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080', '#808080']
def one_dec_places(x, pos):
"""
Adds 1/10th decimal to plot ticks.
"""
return '%.1f' % x
def two_dec_places(x, pos):
"""
Adds 1/100th decimal to plot ticks.
"""
return '%.2f' % x
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
def format_asset(asset):
"""
If zipline asset objects are used, we want to print them out prettily
within the tear sheet. This function should only be applied directly
before displaying.
"""
try:
import zipline.assets
except ImportError:
return asset
if isinstance(asset, zipline.assets.Asset):
return asset.symbol
else:
return asset
def vectorize(func):
"""
Decorator so that functions can be written to work on Series but
may still be called with DataFrames.
"""
def wrapper(df, *args, **kwargs):
if df.ndim == 1:
return func(df, *args, **kwargs)
elif df.ndim == 2:
return df.apply(func, *args, **kwargs)
return wrapper
def extract_rets_pos_txn_from_zipline(backtest):
"""
Extract returns, positions, transactions and leverage from the
backtest data structure returned by zipline.TradingAlgorithm.run().
The returned data structures are in a format compatible with the
rest of pyfolio and can be directly passed to
e.g. tears.create_full_tear_sheet().
Parameters
----------
backtest : pd.DataFrame
DataFrame returned by zipline.TradingAlgorithm.run()
Returns
-------
returns : pd.Series
Daily returns of strategy.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
Example (on the Quantopian research platform)
---------------------------------------------
>>> backtest = my_algo.run()
>>> returns, positions, transactions =
>>> pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
>>> pyfolio.tears.create_full_tear_sheet(returns,
>>> positions, transactions)
"""
backtest.index = backtest.index.normalize()
if backtest.index.tzinfo is None:
backtest.index = backtest.index.tz_localize('UTC')
returns = backtest.returns
raw_positions = []
for dt, pos_row in backtest.positions.iteritems():
df = pd.DataFrame(pos_row)
df.index = [dt] * len(df)
raw_positions.append(df)
if not raw_positions:
raise ValueError("The backtest does not have any positions.")
positions = pd.concat(raw_positions)
positions = pos.extract_pos(positions, backtest.ending_cash)
transactions = txn.make_transaction_frame(backtest.transactions)
if transactions.index.tzinfo is None:
transactions.index = transactions.index.tz_localize('utc')
return returns, positions, transactions
def print_table(table,
name=None,
float_format=None,
formatters=None,
header_rows=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pandas.Series or pandas.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
float_format : function, optional
Formatter to use for displaying table elements, passed as the
`float_format` arg to pd.Dataframe.to_html.
E.g. `'{0:.2%}'.format` for displaying 100 as '100.00%'.
formatters : list or dict, optional
Formatters to use by column, passed as the `formatters` arg to
pd.Dataframe.to_html.
header_rows : dict, optional
Extra rows to display at the top of the table.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if name is not None:
table.columns.name = name
html = table.to_html(float_format=float_format, formatters=formatters)
if header_rows is not None:
# Count the number of columns for the text to span
n_cols = html.split('<thead>')[1].split('</thead>')[0].count('<th>')
# Generate the HTML for the extra rows
rows = ''
for name, value in header_rows.items():
rows += ('\n <tr style="text-align: right;"><th>%s</th>' +
'<td colspan=%d>%s</td></tr>') % (name, n_cols, value)
# Inject the new HTML
html = html.replace('<thead>', '<thead>' + rows)
display(HTML(html))
def standardize_data(x):
"""
Standardize an array with mean and standard deviation.
Parameters
----------
x : np.array
Array to standardize.
Returns
-------
np.array
Standardized array.
"""
return (x - np.mean(x)) / np.std(x)
def detect_intraday(positions, transactions, threshold=0.25):
"""
Attempt to detect an intraday strategy. Get the number of
positions held at the end of the day, and divide that by the
number of unique stocks transacted every day. If the average quotient
is below a threshold, then an intraday strategy is detected.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
boolean
True if an intraday strategy is detected.
"""
daily_txn = transactions.copy()
daily_txn.index = daily_txn.index.date
txn_count = daily_txn.groupby(level=0).symbol.nunique().sum()
daily_pos = positions.drop('cash', axis=1).replace(0, np.nan)
return daily_pos.count(axis=1).sum() / txn_count < threshold
def check_intraday(estimate, returns, positions, transactions):
"""
Logic for checking if a strategy is intraday and processing it.
Parameters
----------
estimate: boolean or str, optional
Approximate returns for intraday strategies.
See description in tears.create_full_tear_sheet.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, adjusted for intraday movement.
"""
if estimate == 'infer':
if positions is not None and transactions is not None:
if detect_intraday(positions, transactions):
warnings.warn('Detected intraday strategy; inferring positi' +
'ons from transactions. Set estimate_intraday' +
'=False to disable.')
return estimate_intraday(returns, positions, transactions)
else:
return positions
else:
return positions
elif estimate:
if positions is not None and transactions is not None:
return estimate_intraday(returns, positions, transactions)
else:
raise ValueError('Positions and txns needed to estimate intraday')
else:
return positions
def estimate_intraday(returns, positions, transactions, EOD_hour=23):
"""
Intraday strategies will often not hold positions at the day end.
This attempts to find the point in the day that best represents
the activity of the strategy on that day, and effectively resamples
the end-of-day positions with the positions at this point of day.
The point of day is found by detecting when our exposure in the
market is at its maximum point. Note that this is an estimate.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, resampled for intraday behavior.
"""
# Construct DataFrame of transaction amounts
txn_val = transactions.copy()
txn_val.index.names = ['date']
txn_val['value'] = txn_val.amount * txn_val.price
txn_val = txn_val.reset_index().pivot_table(
index='date', values='value',
columns='symbol').replace(np.nan, 0)
# Cumulate transaction amounts each day
txn_val = txn_val.groupby(txn_val.index.date).cumsum()
# Calculate exposure, then take peak of exposure every day
txn_val['exposure'] = txn_val.abs().sum(axis=1)
condition = (txn_val['exposure'] == txn_val.groupby(
pd.Grouper(freq='24H'))['exposure'].transform(max))
txn_val = txn_val[condition].drop('exposure', axis=1)
# Compute cash delta
txn_val['cash'] = -txn_val.sum(axis=1)
# Shift EOD positions to positions at start of next trading day
positions_shifted = positions.copy().shift(1).fillna(0)
starting_capital = positions.iloc[0].sum() / (1 + returns[0])
positions_shifted.cash[0] = starting_capital
# Format and add start positions to intraday position changes
txn_val.index = txn_val.index.normalize()
corrected_positions = positions_shifted.add(txn_val, fill_value=0)
corrected_positions.index.name = 'period_close'
corrected_positions.columns.name = 'sid'
return corrected_positions
def clip_returns_to_benchmark(rets, benchmark_rets):
"""
Drop entries from rets so that the start and end dates of rets match those
of benchmark_rets.
Parameters
----------
rets : pd.Series
Daily returns of the strategy, noncumulative.
- See pf.tears.create_full_tear_sheet for more details
benchmark_rets : pd.Series
Daily returns of the benchmark, noncumulative.
Returns
-------
clipped_rets : pd.Series
Daily noncumulative returns with index clipped to match that of
benchmark returns.
"""
if (rets.index[0] < benchmark_rets.index[0]) \
or (rets.index[-1] > benchmark_rets.index[-1]):
clipped_rets = rets[benchmark_rets.index]
else:
clipped_rets = rets
return clipped_rets
def to_utc(df):
"""
For use in tests; applied UTC timestamp to DataFrame.
"""
try:
df.index = df.index.tz_localize('UTC')
except TypeError:
df.index = df.index.tz_convert('UTC')
return df
def to_series(df):
"""
For use in tests; converts DataFrame's first column to Series.
"""
return df[df.columns[0]]
# This functions is simply a passthrough to empyrical, but is
# required by the register_returns_func and get_symbol_rets.
default_returns_func = empyrical.utils.default_returns_func
# Settings dict to store functions/values that may
# need to be overridden depending on the users environment
SETTINGS = {
'returns_func': default_returns_func
}
def register_return_func(func):
"""
Registers the 'returns_func' that will be called for
retrieving returns data.
Parameters
----------
func : function
A function that returns a pandas Series of asset returns.
The signature of the function must be as follows
>>> func(symbol)
Where symbol is an asset identifier
Returns
-------
None
"""
SETTINGS['returns_func'] = func
def get_symbol_rets(symbol, start=None, end=None):
"""
Calls the currently registered 'returns_func'
Parameters
----------
symbol : object
An identifier for the asset whose return
series is desired.
e.g. ticker symbol or database ID
start : date, optional
Earliest date to fetch data for.
Defaults to earliest date available.
end : date, optional
Latest date to fetch data for.
Defaults to latest date available.
Returns
-------
pandas.Series
Returned by the current 'returns_func'
"""
return SETTINGS['returns_func'](symbol,
start=start,
end=end)
def configure_legend(ax, autofmt_xdate=True, change_colors=False,
rotation=30, ha='right'):
"""
Format legend for perf attribution plots:
- put legend to the right of plot instead of overlapping with it
- make legend order match up with graph lines
- set colors according to colormap
"""
chartBox = ax.get_position()
ax.set_position([chartBox.x0, chartBox.y0,
chartBox.width * 0.75, chartBox.height])
# make legend order match graph lines
handles, labels = ax.get_legend_handles_labels()
handles_and_labels_sorted = sorted(zip(handles, labels),
key=lambda x: x[0].get_ydata()[-1],
reverse=True)
handles_sorted = [h[0] for h in handles_and_labels_sorted]
labels_sorted = [h[1] for h in handles_and_labels_sorted]
if change_colors:
for handle, color in zip(handles_sorted,
cycle(COLORS)):
handle.set_color(color)
ax.legend(handles=handles_sorted,
labels=labels_sorted,
frameon=True,
framealpha=0.5,
loc='upper left',
bbox_to_anchor=(1.05, 1),
fontsize='small')
# manually rotate xticklabels instead of using matplotlib's autofmt_xdate
# because it disables xticklabels for all but the last plot
if autofmt_xdate:
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
def sample_colormap(cmap_name, n_samples):
"""
Sample a colormap from matplotlib
"""
colors = []
colormap = cm.cmap_d[cmap_name]
for i in np.linspace(0, 1, n_samples):
colors.append(colormap(i))
return colors
| 29.785978 | 78 | 0.647113 |
794542cb9d4c824ca75e1bd4a2063dfcad138eba | 5,572 | py | Python | homeassistant/components/rflink/cover.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | homeassistant/components/rflink/cover.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/rflink/cover.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 2 | 2020-04-19T13:35:24.000Z | 2020-04-19T13:35:51.000Z | """Support for Rflink Cover devices."""
import logging
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverDevice
from homeassistant.const import CONF_NAME, CONF_TYPE, STATE_OPEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import (
CONF_ALIASES,
CONF_DEVICE_DEFAULTS,
CONF_DEVICES,
CONF_FIRE_EVENT,
CONF_GROUP,
CONF_GROUP_ALIASES,
CONF_NOGROUP_ALIASES,
CONF_SIGNAL_REPETITIONS,
DEVICE_DEFAULTS_SCHEMA,
RflinkCommand,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
TYPE_STANDARD = "standard"
TYPE_INVERTED = "inverted"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})
): DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_DEVICES, default={}): vol.Schema(
{
cv.string: {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE): vol.Any(TYPE_STANDARD, TYPE_INVERTED),
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_GROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
}
}
),
}
)
def entity_type_for_device_id(device_id):
"""Return entity class for protocol of a given device_id.
Async friendly.
"""
entity_type_mapping = {
# KlikAanKlikUit cover have the controls inverted
"newkaku": TYPE_INVERTED
}
protocol = device_id.split("_")[0]
return entity_type_mapping.get(protocol, TYPE_STANDARD)
def entity_class_for_type(entity_type):
"""Translate entity type to entity class.
Async friendly.
"""
entity_device_mapping = {
# default cover implementation
TYPE_STANDARD: RflinkCover,
# cover with open/close commands inverted
# like KAKU/COCO ASUN-650
TYPE_INVERTED: InvertedRflinkCover,
}
return entity_device_mapping.get(entity_type, RflinkCover)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink cover devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
# Determine what kind of entity to create, RflinkCover
# or InvertedRflinkCover
if CONF_TYPE in config:
# Remove type from config to not pass it as and argument
# to entity instantiation
entity_type = config.pop(CONF_TYPE)
else:
entity_type = entity_type_for_device_id(device_id)
entity_class = entity_class_for_type(entity_type)
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
device = entity_class(device_id, **device_config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink cover platform."""
async_add_entities(devices_from_config(config))
class RflinkCover(RflinkCommand, CoverDevice, RestoreEntity):
"""Rflink entity which can switch on/stop/off (eg: cover)."""
async def async_added_to_hass(self):
"""Restore RFLink cover state (OPEN/CLOSE)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_OPEN
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command in ["on", "allon", "up"]:
self._state = True
elif command in ["off", "alloff", "down"]:
self._state = False
@property
def should_poll(self):
"""No polling available in RFlink cover."""
return False
@property
def is_closed(self):
"""Return if the cover is closed."""
return not self._state
@property
def assumed_state(self):
"""Return True because covers can be stopped midway."""
return True
async def async_close_cover(self, **kwargs):
"""Turn the device close."""
await self._async_handle_command("close_cover")
async def async_open_cover(self, **kwargs):
"""Turn the device open."""
await self._async_handle_command("open_cover")
async def async_stop_cover(self, **kwargs):
"""Turn the device stop."""
await self._async_handle_command("stop_cover")
class InvertedRflinkCover(RflinkCover):
"""Rflink cover that has inverted open/close commands."""
async def _async_send_command(self, cmd, repetitions):
"""Will invert only the UP/DOWN commands."""
_LOGGER.debug("Getting command: %s for Rflink device: %s", cmd, self._device_id)
cmd_inv = {"UP": "DOWN", "DOWN": "UP"}
await super()._async_send_command(cmd_inv.get(cmd, cmd), repetitions)
| 32.395349 | 88 | 0.644113 |
794543e16813640adcddf6bd58e3d62edf26d08a | 1,036 | py | Python | src/byro/common/migrations/0007_auto_20180224_2114.py | mv-idatalytics/jenkins-byro | 1be7f30a8a8c40adf93ea099e9d85cd28fc19783 | [
"Apache-2.0"
] | null | null | null | src/byro/common/migrations/0007_auto_20180224_2114.py | mv-idatalytics/jenkins-byro | 1be7f30a8a8c40adf93ea099e9d85cd28fc19783 | [
"Apache-2.0"
] | null | null | null | src/byro/common/migrations/0007_auto_20180224_2114.py | mv-idatalytics/jenkins-byro | 1be7f30a8a8c40adf93ea099e9d85cd28fc19783 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 1.11.8 on 2018-02-24 21:14
from django.db import migrations
def init_templates(apps, schema_editor):
from byro.mails import default
MailTemplate = apps.get_model("mails", "MailTemplate")
Configuration = apps.get_model("common", "Configuration")
config, _ = Configuration.objects.get_or_create()
if not config.welcome_member_template:
welcome_member = MailTemplate.objects.create(
subject=default.WELCOME_MEMBER_SUBJECT, text=default.WELCOME_MEMBER_TEXT
)
config.welcome_member_template = welcome_member
if not config.welcome_office_template:
welcome_office = MailTemplate.objects.create(
subject=default.WELCOME_OFFICE_SUBJECT, text=default.WELCOME_OFFICE_TEXT
)
config.welcome_office_template = welcome_office
config.save()
class Migration(migrations.Migration):
dependencies = [("common", "0006_auto_20180224_2114")]
operations = [migrations.RunPython(init_templates, migrations.RunPython.noop)]
| 34.533333 | 84 | 0.737452 |
7945445e5f3173f3895b6dc9bf88d760f37fe19b | 11,313 | py | Python | Jumpscale/tools/configmanager/DbConfig.py | threefoldtech/jumpscale_core9 | f366bea61bf2e6112c4afb5a4979ec00b843e58a | [
"Apache-2.0"
] | null | null | null | Jumpscale/tools/configmanager/DbConfig.py | threefoldtech/jumpscale_core9 | f366bea61bf2e6112c4afb5a4979ec00b843e58a | [
"Apache-2.0"
] | 162 | 2018-07-31T14:40:33.000Z | 2019-04-03T06:31:20.000Z | Jumpscale/tools/configmanager/DbConfig.py | threefoldtech/jumpscale_core9 | f366bea61bf2e6112c4afb5a4979ec00b843e58a | [
"Apache-2.0"
] | 2 | 2018-07-31T12:42:02.000Z | 2018-11-21T09:40:22.000Z | from jumpscale import j
import re
import json
from inspect import getmodule
JSBASE = j.application.jsbase_get_class()
HSET_NAME = "cfg::{configrepo}::{instance}::{clientpath}"
def get_key_info(k):
m = re.match("cfg::(?P<configrepo>\w+)::(?P<instance>.+)::(?P<clientpath>.+)", k)
if m:
return m.groupdict()
return None
def mk_hsetname(configrepo="myconfig", instance="main", clientpath=""):
return HSET_NAME.format(configrepo=configrepo, instance=instance, clientpath=clientpath)
def hset(client, hsetname, key, val):
nsclient = client.namespace
data_str = nsclient.get(hsetname)
if isinstance(data_str, bytes):
data_str = data_str.decode()
data = {}
try:
data = json.loads(data_str)
except Exception as e:
pass
data[key] = val
data_str = json.dumps(data)
nsclient.set(data_str, hsetname)
return True
def hsetmany(client, hsetname, **kwargs):
nsclient = client.namespace
data_str = nsclient.get(hsetname)
if isinstance(data_str, bytes):
data_str = data_str.decode()
data = {}
try:
data = json.loads(data_str)
except Exception as e:
pass
for k, v in kwargs.items():
data[k] = v
data_str = json.dumps(data)
nsclient.set(data_str, hsetname)
return True
def hget(client, hsetname, key):
try:
return hget_all(client, hsetname)[key]
except:
return {}
def hget_all(client, hsetname):
nsclient = client.namespace
data_str = nsclient.get(hsetname)
if isinstance(data_str, bytes):
data_str = data_str.decode()
try:
data = json.loads(data_str)
return data
except:
return {}
def hdel(client, hsetname):
nsclient = client.namespace
nsclient.delete(hsetname)
def hdel_key(client, hsetname, key):
d = hget_all(client, hsetname)
try:
del d[key]
except:
return True
else:
hsetmany(client, hsetname, **d)
def iselect_all(client, pattern=None):
nsclient = client.namespace
result = []
def do(arg, result):
if pattern:
try:
arg = arg.decode()
except Exception as e:
print(e)
else:
if re.match(pattern, arg):
result.append(arg)
else:
result.append(arg)
return result
nsclient.iterate(do, key_start=None, direction="forward", nrrecords=100000, _keyonly=True, result=result)
return result
def find_configs(client):
configs = []
for el in iselect_all(client):
if el.startswith("cfg::"): # USE re.match better
configs.append(el)
return configs
def template_from_object(obj):
module = None
if hasattr(obj, "_child_class"):
obj._child_class
try:
module = getmodule(obj._child_class)
except Exception as e:
if "cannot import name" in str(e):
raise RuntimeError("cannot find TEMPLATE in %s, please call the template: TEMPLATE" %
obj._child_class.__module__)
raise e
else:
try:
module = getmodule(obj)
except Exception as e:
if "cannot import name" in str(e):
raise RuntimeError("cannot find TEMPLATE in %s, please call the template: TEMPLATE" % obj.__module__)
raise e
return module.TEMPLATE
class DbConfig(JSBASE):
def __init__(self, instance="main", location=None, template=None, data={}):
"""
jsclient_object is e.g. j.clients.packet.net
"""
JSBASE.__init__(self)
self._zdbsimplecl = None
if j.core.state.configGetFromDict("myconfig", "backend", "file") == "db":
backend_addr = j.core.state.configGetFromDict("myconfig", "backend_addr", "localhost:9900")
adminsecret = j.core.state.configGetFromDict("myconfig", "adminsecret", "")
secrets = j.core.state.configGetFromDict("myconfig", "secrets", "")
namespace = j.tools.configmanager.namespace
if ":" in backend_addr:
host, port = backend_addr.split(":")
if port.isdigit():
port = int(port)
else:
raise RuntimeError("port is expected to be a number, but got {}".format(port))
self._zdbsimplecl = j.clients.zdbsimple.get(host, port, adminsecret, secrets, namespace)
else:
raise RuntimeError("can't create DbConfig with file backend.")
data = data or {}
self.location = j.data.text.toStr(location)
self.instance = j.data.text.toStr(instance)
self.hsetname = mk_hsetname(configrepo="myconfig", instance=self.instance, clientpath=self.location)
self._path = self.location
self.error = False # if this is true then need to call the configure part
self._template = template
if not j.data.types.string.check(template):
if template is not None:
raise RuntimeError("template needs to be None or string:%s" % template)
if self.instance is None:
raise RuntimeError("instance cannot be None")
self.reset()
self.load()
if not self._zdbsimplecl.namespace.get(self.hsetname):
self.new = True
# this is data on disk, because exists, should already apply to template
# without decryption so needs to go to self._data
dataOnFS = self.data # now decrypt
# make sure template has been applied !
data2, error = j.data.serializer.toml.merge(tomlsource=self.template, tomlupdate=dataOnFS, listunique=True)
if data != {}:
# update with data given
data, error = j.data.serializer.toml.merge(tomlsource=data2, tomlupdate=data, listunique=True)
self.data = data
else:
# now put the data into the object (encryption)
self.data = data2
# do the fancydump to make sure we really look at differences
if j.data.serializer.toml.fancydumps(self.data) != j.data.serializer.toml.fancydumps(dataOnFS):
self.logger.debug("change of data in config, need to save")
self.save()
def reset(self):
self._data = {}
self.loaded = False
self._path = None
self._nacl = None
self.new = False
@property
def path(self):
self.logger.debug("init getpath:%s" % self._path)
if not self._path:
# j.sal.fs.joinPaths(j.data.text.toStr(j.tools.configmanager.path), self.location, self.instance + '.toml')
self._path = self.location
self.logger.debug("getpath:%s" % self._path)
return self._path
@property
def nacl(self):
if not self._nacl:
if j.tools.configmanager.keyname:
self._nacl = j.data.nacl.get(sshkeyname=j.tools.configmanager.keyname)
else:
self._nacl = j.data.nacl.get()
return self._nacl
def instance_set(self, instance):
"""
will change instance name & delete data
"""
self.instance = instance
self.load(reset=True)
def load(self, reset=False):
"""
@RETURN if 1 means did not find the toml file so is new
"""
# if not reset or self._data == {}:
# TODO: assert the hset exists.
bdata = hget_all(self._zdbsimplecl, self.hsetname)
for k, v in bdata.items():
if isinstance(k, bytes):
k = k.decode()
if isinstance(v, bytes):
v = v.decode()
self._data[k] = v
for key, val in self.template.items():
ttype = j.data.types.type_detect(self.template[key])
if ttype.BASETYPE == "string":
if key.encode() in self._data:
self._data[key.encode()] = self._data[key.encode()].strip()
def save(self):
hsetmany(self._zdbsimplecl, self.hsetname, **self._data)
def delete(self):
hdel(self._zdbsimplecl, self.hsetname)
@property
def template(self):
if self._template is None or self._template == '':
obj = eval(self.location)
self._template = template_from_object(obj)
if j.data.types.string.check(self._template):
try:
self._template = j.data.serializer.toml.loads(self._template)
except Exception as e:
if "deserialization failed" in str(e):
raise RuntimeError("config file:%s is not valid toml.\n%s" % (self.path, self._template))
raise e
return self._template
@property
def data(self):
res = {}
if self._data == {}:
self.load()
for key, item in self._data.items():
if isinstance(key, bytes):
key = key.decode()
if key not in self.template:
self.logger.warning("could not find key:%s in template, while it was in instance:%s" % (key, self.path))
self.logger.debug("template was:%s\n" % self.template)
self.error = True
else:
ttype = j.data.types.type_detect(self.template[key])
if key.endswith("_"):
if ttype.BASETYPE == "string":
if item != '' and item != '""':
res[key] = self.nacl.decryptSymmetric(item, hex=True).decode()
else:
res[key] = ''
else:
res[key] = item
else:
res[key] = item
return res
@data.setter
def data(self, value):
if j.data.types.dict.check(value) is False:
raise TypeError("value needs to be dict")
changed = False
for key, item in value.items():
ch1 = self.data_set(key, item, save=False)
changed = changed or ch1
if changed:
# raise RuntimeError()
self.logger.debug("changed:\n%s" % self)
self.save()
def data_set(self, key, val, save=True):
if key not in self.template:
raise RuntimeError(
"Cannot find key:%s in template for %s" % (key, self))
if key not in self._data or self._data[key] != val:
ttype = j.data.types.type_detect(self.template[key])
if key.endswith("_"):
if ttype.BASETYPE == "string":
if val != '' and val != '""':
val = self.nacl.encryptSymmetric(val, hex=True, salt=val)
if key in self._data and val == self._data[key]:
return False
self._data[key] = val
if save:
self.save()
return True
else:
return False
@property
def yaml(self):
return j.data.serializer.toml.fancydumps(self._data)
def __str__(self):
out = "config:%s:%s\n\n" % (self.location, self.instance)
out += j.data.text.indent(self.yaml)
return out
__repr__ = __str__
| 32.696532 | 120 | 0.566516 |
7945447624fcf35240819f3d84244ac56d1d3804 | 2,086 | py | Python | src/azure-cli-core/azure/cli/core/extension/_homebrew_patch.py | psignoret/azure-cli | 1a4a043750315f9a7f2894b4287126089978b615 | [
"MIT"
] | 1 | 2020-12-14T15:30:11.000Z | 2020-12-14T15:30:11.000Z | src/azure-cli-core/azure/cli/core/extension/_homebrew_patch.py | psignoret/azure-cli | 1a4a043750315f9a7f2894b4287126089978b615 | [
"MIT"
] | 4 | 2018-08-08T20:01:17.000Z | 2018-09-17T15:20:06.000Z | src/azure-cli-core/azure/cli/core/extension/_homebrew_patch.py | psignoret/azure-cli | 1a4a043750315f9a7f2894b4287126089978b615 | [
"MIT"
] | 1 | 2020-12-22T00:28:33.000Z | 2020-12-22T00:28:33.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import sys
from knack.log import get_logger
logger = get_logger(__name__)
HOMEBREW_CELLAR_PATH = '/usr/local/Cellar/azure-cli/'
def is_homebrew():
return any((p.startswith(HOMEBREW_CELLAR_PATH) for p in sys.path))
# A workaround for https://github.com/Azure/azure-cli/issues/4428
class HomebrewPipPatch(object): # pylint: disable=too-few-public-methods
CFG_FILE = os.path.expanduser(os.path.join('~', '.pydistutils.cfg'))
def __init__(self):
self.our_cfg_file = False
def __enter__(self):
if not is_homebrew():
return
if os.path.isfile(HomebrewPipPatch.CFG_FILE):
logger.debug("Homebrew patch: The file %s already exists and we will not overwrite it. "
"If extension installation fails, temporarily rename this file and try again.",
HomebrewPipPatch.CFG_FILE)
logger.warning("Unable to apply Homebrew patch for extension installation. "
"Attempting to continue anyway...")
self.our_cfg_file = False
else:
logger.debug("Homebrew patch: Temporarily creating %s to support extension installation on Homebrew.",
HomebrewPipPatch.CFG_FILE)
with open(HomebrewPipPatch.CFG_FILE, "w") as f:
f.write("[install]\nprefix=")
self.our_cfg_file = True
def __exit__(self, exc_type, exc_value, tb):
if not is_homebrew():
return
if self.our_cfg_file and os.path.isfile(HomebrewPipPatch.CFG_FILE):
logger.debug("Homebrew patch: Deleting the temporarily created %s", HomebrewPipPatch.CFG_FILE)
os.remove(HomebrewPipPatch.CFG_FILE)
| 40.901961 | 114 | 0.598274 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.