text
stringlengths 3
1.05M
|
---|
const { CaseExternalIncomplete } = require('./CaseExternalIncomplete');
const { ContactFactory } = require('../contacts/ContactFactory');
describe('CaseExternalIncomplete entity', () => {
describe('isValid', () => {
it('assigns a new irsNoticeDate if one is not passed in', () => {
const caseExternalIncomplete = new CaseExternalIncomplete({
caseType: 'Other',
contactPrimary: {
address1: '99 South Oak Lane',
address2: 'Culpa numquam saepe ',
address3: 'Eaque voluptates com',
city: 'Dignissimos voluptat',
countryType: 'domestic',
email: '[email protected]',
name: 'Priscilla Kline',
phone: '+1 (215) 128-6587',
postalCode: '69580',
state: 'AR',
},
contactSecondary: {},
filingType: 'Myself',
hasIrsNotice: false,
irsNoticeDate: null,
partyType: ContactFactory.PARTY_TYPES.petitioner,
petitionFileId: '102e29fa-bb8c-43ff-b18f-ddce9089dd80',
preferredTrialCity: 'Chattanooga, Tennessee',
procedureType: 'Small',
});
expect(caseExternalIncomplete.getFormattedValidationErrors()).toEqual(
null,
);
});
});
});
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "band"
app_title = "Band"
app_publisher = "Ahmad Ragheb"
app_description = "Band website"
app_icon = "red"
app_color = "red"
app_email = "[email protected]"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/band/css/band.css"
# app_include_js = "/assets/band/js/band.js"
# include js, css files in header of web template
# web_include_css = "/assets/band/css/band.css"
# web_include_js = "/assets/band/js/band.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
home_page = "home"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "band.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "band.install.before_install"
# after_install = "band.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "band.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "band.tasks.all"
# ],
# "daily": [
# "band.tasks.daily"
# ],
# "hourly": [
# "band.tasks.hourly"
# ],
# "weekly": [
# "band.tasks.weekly"
# ]
# "monthly": [
# "band.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "band.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "band.event.get_events"
# }
|
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# Application definition
INSTALLED_APPS = [
'instagram.apps.InstagramConfig',
'bootstrap4',
'pyuploadcare.dj',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'insta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
UPLOADCARE = {
'pub_key': 'aeed84efde73eba99ff6',
'secret': '1110614ad7e895d38955',
}
WSGI_APPLICATION = 'insta.wsgi.application'
# Database
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
django_heroku.settings(locals())
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
|
/*
* Author: CM
* Custom CM rules for Modernizr.
*/
(function() {
Modernizr.addTest('inputinfixed', function() {
return !navigator.userAgent.match(/(iPad|iPhone|iPod)/i);
});
Modernizr.addTest('webvr', function() {
return 'getVRDisplays' in navigator;
});
/*
* test based on THREE.js method, see: http://threejs.org/examples/js/Detector.js
*/
Modernizr.addTest('webgl', function() {
try {
var canvas = document.createElement('canvas');
return !!( window.WebGLRenderingContext && ( canvas.getContext('webgl') || canvas.getContext('experimental-webgl')));
} catch (e) {
return false;
}
});
})();
|
#ifndef LEXER_H
#define LEXER_H
#include <fstream>
#include <iomanip>
#include <set>
#include <vector>
#include "Token.h"
class Lexer {
public:
Lexer();
void feed(string line);
void clearResult();
vector<vector<Token> > getResult();
private:
const static set<string> KEY_WORDS;
const static set<string> OPERATORS;
const static set<string> SPECIAL_SYMBOLS;
const static set<string> COMMENT;
const static string CHAR_IDENTIFIER;
const static string ESCAPE_SYSBOL;
const static string DOT_POINT;
vector<vector<Token> > result;
// handle Keyword, number and identifier
Token handleWord(string st);
bool isIdentifierFirstElement(string ch);
bool isIdentifierElement(string ch);
bool isUpperAlphabet(string ch);
bool isUpperAlphabet(char c);
bool isLowerAlphabet(string ch);
bool isLowerAlphabet(char c);
bool isAlphabet(string ch);
bool isAlphabet(char c);
bool isDigit(string ch);
bool isDigit(char c);
bool isEmptyChacter(string ch);
bool isEmptyChacter(char c);
template <class T>
bool contain(set<T> container, T content);
};
#endif
|
#include <stdio.h>
#include "tokenizer.h"
#include "value.h"
#include "linkedlist.h"
#include "parser.h"
#include "talloc.h"
int main()
{
Value *tokensList = tokenize();
Value *parseTree = parse(tokensList);
printTree(parseTree);
printf("\n");
tfree();
return 0;
}
|
#!/usr/bin/env python3.4
"""
Simple doctest:
>>> fizzbuzz("sample.txt")
1 2 F 4 B F 7 8 F B
1 F 3 F 5 F B F 9 F 11 F 13 FB 15
"""
def fizzbuzz(filename):
with open(filename) as file:
for line in file:
print(*fizzline(*[int(x) for x in line.split()]))
"""Takes 3 numbers, x, y and, and prints out a list from
1...n inclusive with numbers that divide by x replaced by
F, and numbers that divide by y with B, and numbers that
divide by x and y by FB.
Eg:
>>> fizzline(3, 5, 10)
1 2 F 4 B F 7 8 F B
"""
def fizzline(x, y, n):
ret = []
for i in range(1,n+1):
if not i % x:
if not i % y:
ret.append('FB')
else:
ret.append('F')
elif not i % y:
ret.append('B')
else:
ret.append(i)
return ret
def usage():
print("""fizzbuzz:
Test:
python3 -m doctest -v fizzbuzz.py
Run:
python3 fizzbuzz.py sample.txt
""")
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
usage()
else:
fizzbuzz(sys.argv[1])
|
var path = require("path");
/**
* GET /login/sessionCookieRedirect?checkAccountSetupComplete=true&token=20111h0ZlxjZM6hdu56L3U2VQSojaXPTA6uZViUr5wIOT7Ik22ZirK_&redirectUrl=http://127.0.0.1:7777/oauth2/v1/authorize/redirect?okta_key=ONHYUjtp7FHAP358vNnHFgbRFNVpCEruGRtdiffwbdA
*
* host: rain.okta1.com:1802
* connection: keep-alive
* user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0
* accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,* / *;q=0.8
* accept-encoding: gzip
* accept-language: en-US
* cookie: JSESSIONID=B74C8FEC5F7D905A21E7988ACBC53A39; DT=DI0sFErlXCyRx-1NKHQV9y5pw
* cache-control: no-cache, no-store
* pragma: no-cache
*/
module.exports = function (req, res) {
res.statusCode = 302;
res.setHeader("server", "Apache-Coyote/1.1");
res.setHeader("x-okta-request-id", "reqTIwV48wBTGWmCs0megWnYQ");
res.setHeader("p3p", "CP=\"HONK\"");
res.setHeader("set-cookie", ["sid=\"\"; Expires=Thu, 01-Jan-1970 00:00:10 GMT; Path=/","t=default; Path=/","sid=102TyF_klyhSbim6RL_GTmW5w;Version=1;Path=/;HttpOnly","proximity_6d91fdca6790f698a3af81622d4abc60=\"ggfOlTE5YKQd/xAzn4tjgBQfOerFAXw+2+UMDOqTAzARtaIbF/5qbKvTFvjVPAMQrNEuS7ZqPTtJoCFGmqQjyIllL48jwfBjvxC/TeU++BeNUJhC2jTjlVnORgs9Dh3mYGcBXE5qfZ194SZztU1giMTcGiu6oKiyNrWdOe5FpdjK4G6h7qCyd5NjdYTLQa2U\"; Version=1; Max-Age=31536000; Expires=Tue, 23-Oct-2018 18:59:36 GMT; Path=/","JSESSIONID=B74C8FEC5F7D905A21E7988ACBC53A39; Path=/"]);
res.setHeader("x-rate-limit-limit", "10000");
res.setHeader("x-rate-limit-remaining", "9996");
res.setHeader("x-rate-limit-reset", "1508785218");
res.setHeader("x-okta-backend", "albatross");
res.setHeader("cache-control", "no-cache, no-store");
res.setHeader("pragma", "no-cache");
res.setHeader("expires", "0");
res.setHeader("x-frame-options", "SAMEORIGIN");
res.setHeader("location", "http://127.0.0.1:7777/oauth2/v1/authorize/redirect?okta_key=ONHYUjtp7FHAP358vNnHFgbRFNVpCEruGRtdiffwbdA");
res.setHeader("content-language", "en");
res.setHeader("content-length", "0");
res.setHeader("date", "Mon, 23 Oct 2017 18:59:36 GMT");
res.setHeader("x-yakbak-tape", path.basename(__filename, ".js"));
res.end();
return __filename;
};
|
# +
import bq
from defaults import DEFAULT_DATASETS
import render
COMBINED = DEFAULT_DATASETS.latest.combined
DEID = DEFAULT_DATASETS.latest.deid
print("""COMBINED = {COMBINED}
DEID = {DEID}""".format(COMBINED=COMBINED, DEID=DEID))
# -
# ## Row counts in combined `_mapping*` and deid `*_ext` tables
ROW_COUNTS_QUERY = """
SELECT dataset_id,
REPLACE(REPLACE(table_id, '_mapping_', ''), '_ext', '') mapped_table,
table_id,
creation_time,
last_modified_time,
row_count
FROM
(SELECT *
FROM {DEID}.__TABLES__
WHERE table_id LIKE '%\\\_ext'
UNION ALL
SELECT *
FROM {COMBINED}.__TABLES__ d1
WHERE table_id LIKE '\\\_mapping\\\_%')
ORDER BY REPLACE(REPLACE(table_id, '_mapping_', ''), '_ext', ''), dataset_id
"""
q = ROW_COUNTS_QUERY.format(COMBINED=COMBINED, DEID=DEID)
row_counts_df = bq.query(q)
render.dataframe(row_counts_df)
# ## Side by side comparison of row counts
compare_df = row_counts_df.pivot(index='mapped_table', columns='dataset_id', values='row_count')
render.dataframe(compare_df)
# ## Row count differences
# The combined mapping tables and deid ext tables are expected to have the same number of rows. Below we find where the row counts differ.
query_str = '{DEID} <> {COMBINED}'.format(COMBINED=COMBINED, DEID=DEID)
diff_row_counts_df = compare_df.query(query_str)
render.dataframe(diff_row_counts_df)
|
/*
* This script represents an operator over Gearman protocol.
* {@see http://gearman.org/protocol}
*
* (C) 2013 Vaclav Sykora
* Apache License, Version 2.0, http://www.apache.org/licenses/
*
*/
var winston = require('winston'),
util = require('util');
// various constants used in protocol
var constants = exports.CONSTANTS = {
TYPE_REQ: 1,
TYPE_RESP: 2,
// --
HEADER_REQ: 0x00524551,
HEADER_RESP: 0x00524553,
HEADER_LEN: 12,
// --
UNKNOWN_OPTION: 'UNKNOWN_OPTION'
};
var logger = winston.loggers.get('protocol');
/** @enum */
exports.DEFINITION = {
CAN_DO: [1, constants.TYPE_REQ], // W->J: FUNC
CANT_DO: [2, constants.TYPE_REQ], // W->J: FUNC
RESET_ABILITIES: [3, constants.TYPE_REQ], // # W->J: --
PRE_SLEEP: [4, constants.TYPE_REQ], // W->J: --
// 5 (unused)
NOOP: [6, constants.TYPE_RESP, ''], // J->W: --
SUBMIT_JOB: [7, constants.TYPE_REQ], // C->J: FUNC[0]UNIQ[0]ARGS
JOB_CREATED: [8, constants.TYPE_RESP, 'L'], // J->C: HANDLE
GRAB_JOB: [9, constants.TYPE_REQ], // W->J: --
NO_JOB: [10, constants.TYPE_RESP, ''], // J->W: --
JOB_ASSIGN: [11, constants.TYPE_RESP, 'nnb'], // J->W: HANDLE[0]FUNC[0]ARG
WORK_STATUS: [12, constants.TYPE_REQ | constants.TYPE_RESP, 'nnL'], // W->J/C: HANDLE[0]NUMERATOR[0]DENOMINATOR
WORK_COMPLETE: [13, constants.TYPE_REQ | constants.TYPE_RESP, 'nb'], // W->J/C: HANDLE[0]RES
WORK_FAIL: [14, constants.TYPE_REQ | constants.TYPE_RESP, 'L'], // W->J/C: HANDLE
GET_STATUS: [15, constants.TYPE_REQ], // C->J: HANDLE
ECHO_REQ: [16, constants.TYPE_REQ], // C/W->J: TEXT
ECHO_RES: [17, constants.TYPE_RESP, 'L'], // J->C/W: TEXT
SUBMIT_JOB_BG: [18, constants.TYPE_REQ], // C->J: FUNC[0]UNIQ[0]ARGS
ERROR: [19, constants.TYPE_RESP, 'nL'], // J->C/W: ERRCODE[0]ERR_TEXT
STATUS_RES: [20, constants.TYPE_RESP, 'nnnnL'], // C->J: HANDLE[0]KNOWN[0]RUNNING[0]NUM[0]DENOM
SUBMIT_JOB_HIGH: [21, constants.TYPE_REQ], // C->J: FUNC[0]UNIQ[0]ARGS
SET_CLIENT_ID: [22, constants.TYPE_REQ], // W->J: STRING_NO_WHITESPACE
CAN_DO_TIMEOUT: [23, constants.TYPE_REQ], // W->J: FUNC[0]TIMEOUT
// 24 ALL_YOURS (Not yet implemented)
WORK_EXCEPTION: [25, constants.TYPE_REQ | constants.TYPE_RESP, 'nb'], // W->J/C: HANDLE[0]ARG
OPTION_REQ: [26, constants.TYPE_REQ], // C->J: TEXT
OPTION_RES: [27, constants.TYPE_RESP, 'L'], // J->C: TEXT
WORK_DATA: [28, constants.TYPE_REQ | constants.TYPE_RESP, 'nb'], // W->J/C: HANDLE[0]RES
WORK_WARNING: [29, constants.TYPE_REQ | constants.TYPE_RESP, 'nb'], // W->J/C: HANDLE[0]MSG
GRAB_JOB_UNIQ: [30, constants.TYPE_REQ], // W->J: --
JOB_ASSIGN_UNIQ: [31, constants.TYPE_RESP, 'nnnb'], // J->W: HANDLE[0]FUNC[0]ARG
SUBMIT_JOB_HIGH_BG: [32, constants.TYPE_REQ], // C->J: FUNC[0]UNIQ[0]ARGS
SUBMIT_JOB_LOW: [33, constants.TYPE_REQ], // C->J: FUNC[0]UNIQ[0]ARGS
SUBMIT_JOB_LOW_BG: [34, constants.TYPE_REQ], // C->J: FUNC[0]UNIQ[0]ARGS
};
// desc=>code - {CAN_DO: 1}
exports.PACKET_TYPES = {}
// code=>desc - {1: CAN_DO}
exports.PACKET_CODES = {};
// code=>format for RESP - {19: 'nL'}
exports.PACKET_RESP_FORMAT = {};
var def;
for (var i in exports.DEFINITION) {
def = exports.DEFINITION[i];
exports.PACKET_TYPES[i] = def[0];
exports.PACKET_CODES[def[0]] = i;
if (constants.TYPE_RESP === def[1]) {
exports.PACKET_RESP_FORMAT[def[0]] = def[2];
}
}
/**
* Parses given buffer according to defined format.
*
* *format*
* *N* NULL byte terminated string (default encoding)
* *n* NULL byte terminated string (ASCII encoding)
* *L* last segment of buffer (default encoding)
* *b* last segment of buffer (as Buffer)
*
* return array
* *rslt[0]* number of processed bytes
* *rslt[1..]* chunks with order defined by format
*/
exports.parsePacket = function (buff, format) {
var i, j, key;
var rslt = [];
var offset = constants.HEADER_LEN;
var packetType = buff.readUInt32BE(4);
var packetLength = offset + buff.readUInt32BE(8);
format = format || '';
for (i = 0; i < format.length; i ++) {
key = format.charAt(i);
if ('N' == key || 'n' == key) {
// find next NULL
for (j = offset; j < buff.length; j ++) {
if (buff[j] == 0) {
break;
}
}
rslt[i + 1] = buff.toString('n' == key ? 'ascii' : undefined, offset, j);
offset = j + 1; // +1 == skip NULL
} else if ('L' == key) { // LAST segment up to packetLength as String
rslt[i + 1] = buff.toString(undefined, offset, packetLength);
offset = packetLength;
} else if ('b' == key) { // LAST segment up to packetLength as Buffer
rslt[i + 1] = buff.slice(offset);
offset = packetLength;
} else {
return new Error('unknow format: ' + format);
}
}
rslt[0] = offset;
if (logger.isLevelEnabled('verbose')) {
logger.log('verbose', 'packet parsed, bytes=%d, type=%s', offset, exports.PACKET_CODES[packetType]);
}
return rslt;
}
exports.encodePacket = function (packetType, args) {
var i, buff;
var packetLength = 0;
var offset = constants.HEADER_LEN;
// default values
args = args || [];
// compute packet length
for (i = 0; i < args.length; i ++) {
if (args[i].constructor.name === 'String') {
packetLength += Buffer.byteLength(args[i]);
} else if (args[i].constructor.name === 'Buffer') {
packetLength += args[i].length;
} else {
packetLength += Buffer.byteLength(args[i].toString());
}
}
if (args.length > 0) {
packetLength += args.length - 1; // NULL byte terminations
}
buff = new Buffer(constants.HEADER_LEN + packetLength);
buff.writeUInt32BE(constants.HEADER_REQ, 0); // \0REQ
buff.writeUInt32BE(packetType, 4);
buff.writeUInt32BE(packetLength, 8);
// construct packet
for (i = 0; i < args.length; i ++) {
if (args[i].constructor.name === 'String') {
buff.write(args[i], offset);
offset += Buffer.byteLength(args[i]);
} else if (args[i].constructor.name === 'Buffer') {
args[i].copy(buff, offset);
offset += args[i].length;
} else {
buff.write(args[i].toString(), offset);
offset += Buffer.byteLength(args[i].toString());
}
if (i < (args.length - 1)) {
buff.writeUInt8(0, offset); // NULL byte terminated chunk
offset ++;
}
}
logger.log('debug', 'packet encoded, type=%s, buffer.size=%d',
exports.PACKET_CODES[packetType], packetLength + constants.HEADER_LEN);
return buff;
}
|
import { pluginFactory } from '../utils/plugins'; // Component group plugins
import { AlertPlugin } from './alert';
import { BadgePlugin } from './badge';
import { BreadcrumbPlugin } from './breadcrumb';
import { ButtonPlugin } from './button';
import { ButtonGroupPlugin } from './button-group';
import { ButtonToolbarPlugin } from './button-toolbar';
import { CalendarPlugin } from './calendar';
import { CardPlugin } from './card';
import { CarouselPlugin } from './carousel';
import { CollapsePlugin } from './collapse';
import { DropdownPlugin } from './dropdown';
import { EmbedPlugin } from './embed';
import { FormPlugin } from './form';
import { FormCheckboxPlugin } from './form-checkbox';
import { FormDatepickerPlugin } from './form-datepicker';
import { FormFilePlugin } from './form-file';
import { FormGroupPlugin } from './form-group';
import { FormInputPlugin } from './form-input';
import { FormRadioPlugin } from './form-radio';
import { FormSelectPlugin } from './form-select';
import { FormSpinbuttonPlugin } from './form-spinbutton';
import { FormTagsPlugin } from './form-tags';
import { FormTextareaPlugin } from './form-textarea';
import { FormTimepickerPlugin } from './form-timepicker';
import { ImagePlugin } from './image';
import { InputGroupPlugin } from './input-group';
import { JumbotronPlugin } from './jumbotron';
import { LayoutPlugin } from './layout';
import { LinkPlugin } from './link';
import { ListGroupPlugin } from './list-group';
import { MediaPlugin } from './media';
import { ModalPlugin } from './modal';
import { NavPlugin } from './nav';
import { NavbarPlugin } from './navbar';
import { PaginationPlugin } from './pagination';
import { PaginationNavPlugin } from './pagination-nav';
import { PopoverPlugin } from './popover';
import { ProgressPlugin } from './progress';
import { SpinnerPlugin } from './spinner'; // Table plugin includes TableLitePlugin and TableSimplePlugin
import { TablePlugin } from './table';
import { TabsPlugin } from './tabs';
import { TimePlugin } from './time';
import { ToastPlugin } from './toast';
import { TooltipPlugin } from './tooltip'; // Main plugin to install all component group plugins
export var componentsPlugin = /*#__PURE__*/pluginFactory({
plugins: {
AlertPlugin: AlertPlugin,
BadgePlugin: BadgePlugin,
BreadcrumbPlugin: BreadcrumbPlugin,
ButtonPlugin: ButtonPlugin,
ButtonGroupPlugin: ButtonGroupPlugin,
ButtonToolbarPlugin: ButtonToolbarPlugin,
CalendarPlugin: CalendarPlugin,
CardPlugin: CardPlugin,
CarouselPlugin: CarouselPlugin,
CollapsePlugin: CollapsePlugin,
DropdownPlugin: DropdownPlugin,
EmbedPlugin: EmbedPlugin,
FormPlugin: FormPlugin,
FormCheckboxPlugin: FormCheckboxPlugin,
FormDatepickerPlugin: FormDatepickerPlugin,
FormFilePlugin: FormFilePlugin,
FormGroupPlugin: FormGroupPlugin,
FormInputPlugin: FormInputPlugin,
FormRadioPlugin: FormRadioPlugin,
FormSelectPlugin: FormSelectPlugin,
FormSpinbuttonPlugin: FormSpinbuttonPlugin,
FormTagsPlugin: FormTagsPlugin,
FormTextareaPlugin: FormTextareaPlugin,
FormTimepickerPlugin: FormTimepickerPlugin,
ImagePlugin: ImagePlugin,
InputGroupPlugin: InputGroupPlugin,
JumbotronPlugin: JumbotronPlugin,
LayoutPlugin: LayoutPlugin,
LinkPlugin: LinkPlugin,
ListGroupPlugin: ListGroupPlugin,
MediaPlugin: MediaPlugin,
ModalPlugin: ModalPlugin,
NavPlugin: NavPlugin,
NavbarPlugin: NavbarPlugin,
PaginationPlugin: PaginationPlugin,
PaginationNavPlugin: PaginationNavPlugin,
PopoverPlugin: PopoverPlugin,
ProgressPlugin: ProgressPlugin,
SpinnerPlugin: SpinnerPlugin,
TablePlugin: TablePlugin,
TabsPlugin: TabsPlugin,
TimePlugin: TimePlugin,
ToastPlugin: ToastPlugin,
TooltipPlugin: TooltipPlugin
}
}); |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import posixpath
import pathlib
import pickle
import textwrap
import tempfile
import numpy as np
import pytest
import pyarrow as pa
import pyarrow.csv
import pyarrow.feather
import pyarrow.fs as fs
from pyarrow.tests.util import change_cwd, _filesystem_uri, FSProtocolClass
try:
import pandas as pd
except ImportError:
pd = None
try:
import pyarrow.dataset as ds
except ImportError:
ds = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not dataset'
pytestmark = pytest.mark.dataset
def _generate_data(n):
import datetime
import itertools
day = datetime.datetime(2000, 1, 1)
interval = datetime.timedelta(days=5)
colors = itertools.cycle(['green', 'blue', 'yellow', 'red', 'orange'])
data = []
for i in range(n):
data.append((day, i, float(i), next(colors)))
day += interval
return pd.DataFrame(data, columns=['date', 'index', 'value', 'color'])
def _table_from_pandas(df):
schema = pa.schema([
pa.field('date', pa.date32()),
pa.field('index', pa.int64()),
pa.field('value', pa.float64()),
pa.field('color', pa.string()),
])
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
return table.replace_schema_metadata()
@pytest.fixture
@pytest.mark.parquet
def mockfs():
import pyarrow.parquet as pq
mockfs = fs._MockFileSystem()
directories = [
'subdir/1/xxx',
'subdir/2/yyy',
]
for i, directory in enumerate(directories):
path = '{}/file{}.parquet'.format(directory, i)
mockfs.create_dir(directory)
with mockfs.open_output_stream(path) as out:
data = [
list(range(5)),
list(map(float, range(5))),
list(map(str, range(5))),
[i] * 5
]
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64()),
pa.field('str', pa.string()),
pa.field('const', pa.int64()),
])
batch = pa.record_batch(data, schema=schema)
table = pa.Table.from_batches([batch])
pq.write_table(table, out)
return mockfs
@pytest.fixture
def open_logging_fs(monkeypatch):
from pyarrow.fs import PyFileSystem, LocalFileSystem
from .test_fs import ProxyHandler
localfs = LocalFileSystem()
def normalized(paths):
return {localfs.normalize_path(str(p)) for p in paths}
opened = set()
def open_input_file(self, path):
path = localfs.normalize_path(str(path))
opened.add(path)
return self._fs.open_input_file(path)
# patch proxyhandler to log calls to open_input_file
monkeypatch.setattr(ProxyHandler, "open_input_file", open_input_file)
fs = PyFileSystem(ProxyHandler(localfs))
@contextlib.contextmanager
def assert_opens(expected_opened):
opened.clear()
try:
yield
finally:
assert normalized(opened) == normalized(expected_opened)
return fs, assert_opens
@pytest.fixture(scope='module')
def multisourcefs(request):
request.config.pyarrow.requires('pandas')
request.config.pyarrow.requires('parquet')
import pyarrow.parquet as pq
df = _generate_data(1000)
mockfs = fs._MockFileSystem()
# simply split the dataframe into four chunks to construct a data source
# from each chunk into its own directory
df_a, df_b, df_c, df_d = np.array_split(df, 4)
# create a directory containing a flat sequence of parquet files without
# any partitioning involved
mockfs.create_dir('plain')
for i, chunk in enumerate(np.array_split(df_a, 10)):
path = 'plain/chunk-{}.parquet'.format(i)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with schema partitioning by weekday and color
mockfs.create_dir('schema')
for part, chunk in df_b.groupby([df_b.date.dt.dayofweek, df_b.color]):
folder = 'schema/{}/{}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with hive partitioning by year and month
mockfs.create_dir('hive')
for part, chunk in df_c.groupby([df_c.date.dt.year, df_c.date.dt.month]):
folder = 'hive/year={}/month={}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with hive partitioning by color
mockfs.create_dir('hive_color')
for part, chunk in df_d.groupby(["color"]):
folder = 'hive_color/color={}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
return mockfs
@pytest.fixture
def dataset(mockfs):
format = ds.ParquetFileFormat()
selector = fs.FileSelector('subdir', recursive=True)
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
return factory.finish()
@pytest.fixture(params=[
(True, True),
(True, False),
(False, True),
(False, False)
], ids=['threaded-async', 'threaded-sync', 'serial-async', 'serial-sync'])
def dataset_reader(request):
'''
Fixture which allows dataset scanning operations to be
run with/without threads and with/without async
'''
use_threads, use_async = request.param
class reader:
def __init__(self):
self.use_threads = use_threads
self.use_async = use_async
def _patch_kwargs(self, kwargs):
if 'use_threads' in kwargs:
raise Exception(
('Invalid use of dataset_reader, do not specify'
' use_threads'))
if 'use_async' in kwargs:
raise Exception(
'Invalid use of dataset_reader, do not specify use_async')
kwargs['use_threads'] = use_threads
kwargs['use_async'] = use_async
def to_table(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.to_table(**kwargs)
def to_batches(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.to_batches(**kwargs)
def scanner(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.scanner(**kwargs)
def head(self, dataset, num_rows, **kwargs):
self._patch_kwargs(kwargs)
return dataset.head(num_rows, **kwargs)
def take(self, dataset, indices, **kwargs):
self._patch_kwargs(kwargs)
return dataset.take(indices, **kwargs)
def count_rows(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.count_rows(**kwargs)
return reader()
def test_filesystem_dataset(mockfs):
schema = pa.schema([
pa.field('const', pa.int64())
])
file_format = ds.ParquetFileFormat()
paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet']
partitions = [ds.field('part') == x for x in range(1, 3)]
fragments = [file_format.make_fragment(path, mockfs, part)
for path, part in zip(paths, partitions)]
root_partition = ds.field('level') == ds.scalar(1337)
dataset_from_fragments = ds.FileSystemDataset(
fragments, schema=schema, format=file_format,
filesystem=mockfs, root_partition=root_partition,
)
dataset_from_paths = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format, filesystem=mockfs,
partitions=partitions, root_partition=root_partition,
)
for dataset in [dataset_from_fragments, dataset_from_paths]:
assert isinstance(dataset, ds.FileSystemDataset)
assert isinstance(dataset.format, ds.ParquetFileFormat)
assert dataset.partition_expression.equals(root_partition)
assert set(dataset.files) == set(paths)
fragments = list(dataset.get_fragments())
for fragment, partition, path in zip(fragments, partitions, paths):
assert fragment.partition_expression.equals(partition)
assert fragment.path == path
assert isinstance(fragment.format, ds.ParquetFileFormat)
assert isinstance(fragment, ds.ParquetFileFragment)
assert fragment.row_groups == [0]
assert fragment.num_row_groups == 1
row_group_fragments = list(fragment.split_by_row_group())
assert fragment.num_row_groups == len(row_group_fragments) == 1
assert isinstance(row_group_fragments[0], ds.ParquetFileFragment)
assert row_group_fragments[0].path == path
assert row_group_fragments[0].row_groups == [0]
assert row_group_fragments[0].num_row_groups == 1
fragments = list(dataset.get_fragments(filter=ds.field("const") == 0))
assert len(fragments) == 2
# the root_partition keyword has a default
dataset = ds.FileSystemDataset(
fragments, schema=schema, format=file_format, filesystem=mockfs
)
assert dataset.partition_expression.equals(ds.scalar(True))
# from_paths partitions have defaults
dataset = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format, filesystem=mockfs
)
assert dataset.partition_expression.equals(ds.scalar(True))
for fragment in dataset.get_fragments():
assert fragment.partition_expression.equals(ds.scalar(True))
# validation of required arguments
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset(fragments, file_format, schema)
# validation of root_partition
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset(fragments, schema=schema,
format=file_format, root_partition=1)
# missing required argument in from_paths
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset.from_paths(fragments, format=file_format)
def test_filesystem_dataset_no_filesystem_interaction(dataset_reader):
# ARROW-8283
schema = pa.schema([
pa.field('f1', pa.int64())
])
file_format = ds.IpcFileFormat()
paths = ['nonexistingfile.arrow']
# creating the dataset itself doesn't raise
dataset = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format,
filesystem=fs.LocalFileSystem(),
)
# getting fragments also doesn't raise
dataset.get_fragments()
# scanning does raise
with pytest.raises(FileNotFoundError):
dataset_reader.to_table(dataset)
def test_dataset(dataset, dataset_reader):
assert isinstance(dataset, ds.Dataset)
assert isinstance(dataset.schema, pa.Schema)
# TODO(kszucs): test non-boolean Exprs for filter do raise
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for batch in dataset_reader.to_batches(dataset):
assert isinstance(batch, pa.RecordBatch)
assert batch.column(0).equals(expected_i64)
assert batch.column(1).equals(expected_f64)
for batch in dataset_reader.scanner(dataset).scan_batches():
assert isinstance(batch, ds.TaggedRecordBatch)
assert isinstance(batch.fragment, ds.Fragment)
table = dataset_reader.to_table(dataset)
assert isinstance(table, pa.Table)
assert len(table) == 10
condition = ds.field('i64') == 1
result = dataset.to_table(use_threads=True, filter=condition).to_pydict()
# don't rely on the scanning order
assert result['i64'] == [1, 1]
assert result['f64'] == [1., 1.]
assert sorted(result['group']) == [1, 2]
assert sorted(result['key']) == ['xxx', 'yyy']
def test_scanner(dataset, dataset_reader):
scanner = dataset_reader.scanner(
dataset, memory_pool=pa.default_memory_pool())
assert isinstance(scanner, ds.Scanner)
with pytest.raises(pa.ArrowInvalid):
dataset_reader.scanner(dataset, columns=['unknown'])
scanner = dataset_reader.scanner(dataset, columns=['i64'],
memory_pool=pa.default_memory_pool())
assert scanner.dataset_schema == dataset.schema
assert scanner.projected_schema == pa.schema([("i64", pa.int64())])
assert isinstance(scanner, ds.Scanner)
table = scanner.to_table()
for batch in scanner.to_batches():
assert batch.schema == scanner.projected_schema
assert batch.num_columns == 1
assert table == scanner.to_reader().read_all()
assert table.schema == scanner.projected_schema
for i in range(table.num_rows):
indices = pa.array([i])
assert table.take(indices) == scanner.take(indices)
with pytest.raises(pa.ArrowIndexError):
scanner.take(pa.array([table.num_rows]))
assert table.num_rows == scanner.count_rows()
def test_head(dataset, dataset_reader):
result = dataset_reader.head(dataset, 0)
assert result == pa.Table.from_batches([], schema=dataset.schema)
result = dataset_reader.head(dataset, 1, columns=['i64']).to_pydict()
assert result == {'i64': [0]}
result = dataset_reader.head(dataset, 2, columns=['i64'],
filter=ds.field('i64') > 1).to_pydict()
assert result == {'i64': [2, 3]}
result = dataset_reader.head(dataset, 1024, columns=['i64']).to_pydict()
assert result == {'i64': list(range(5)) * 2}
fragment = next(dataset.get_fragments())
result = fragment.head(1, columns=['i64']).to_pydict()
assert result == {'i64': [0]}
result = fragment.head(1024, columns=['i64']).to_pydict()
assert result == {'i64': list(range(5))}
def test_take(dataset, dataset_reader):
fragment = next(dataset.get_fragments())
indices = pa.array([1, 3])
assert dataset_reader.take(
fragment, indices) == dataset_reader.to_table(fragment).take(indices)
with pytest.raises(IndexError):
dataset_reader.take(fragment, pa.array([5]))
indices = pa.array([1, 7])
assert dataset_reader.take(
dataset, indices) == dataset_reader.to_table(dataset).take(indices)
with pytest.raises(IndexError):
dataset_reader.take(dataset, pa.array([10]))
def test_count_rows(dataset, dataset_reader):
fragment = next(dataset.get_fragments())
assert dataset_reader.count_rows(fragment) == 5
assert dataset_reader.count_rows(
fragment, filter=ds.field("i64") == 4) == 1
assert dataset_reader.count_rows(dataset) == 10
# Filter on partition key
assert dataset_reader.count_rows(
dataset, filter=ds.field("group") == 1) == 5
# Filter on data
assert dataset_reader.count_rows(dataset, filter=ds.field("i64") >= 3) == 4
assert dataset_reader.count_rows(dataset, filter=ds.field("i64") < 0) == 0
def test_abstract_classes():
classes = [
ds.FileFormat,
ds.Scanner,
ds.Partitioning,
]
for klass in classes:
with pytest.raises(TypeError):
klass()
def test_partitioning():
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
for klass in [ds.DirectoryPartitioning, ds.HivePartitioning]:
partitioning = klass(schema)
assert isinstance(partitioning, ds.Partitioning)
partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.float64())
])
)
assert partitioning.dictionaries is None
expr = partitioning.parse('/3/3.14')
assert isinstance(expr, ds.Expression)
expected = (ds.field('group') == 3) & (ds.field('key') == 3.14)
assert expr.equals(expected)
with pytest.raises(pa.ArrowInvalid):
partitioning.parse('/prefix/3/aaa')
expr = partitioning.parse('/3')
expected = ds.field('group') == 3
assert expr.equals(expected)
partitioning = ds.HivePartitioning(
pa.schema([
pa.field('alpha', pa.int64()),
pa.field('beta', pa.int64())
]),
null_fallback='xyz'
)
assert partitioning.dictionaries is None
expr = partitioning.parse('/alpha=0/beta=3')
expected = (
(ds.field('alpha') == ds.scalar(0)) &
(ds.field('beta') == ds.scalar(3))
)
assert expr.equals(expected)
expr = partitioning.parse('/alpha=xyz/beta=3')
expected = (
(ds.field('alpha').is_null() & (ds.field('beta') == ds.scalar(3)))
)
assert expr.equals(expected)
for shouldfail in ['/alpha=one/beta=2', '/alpha=one', '/beta=two']:
with pytest.raises(pa.ArrowInvalid):
partitioning.parse(shouldfail)
def test_expression_serialization():
a = ds.scalar(1)
b = ds.scalar(1.1)
c = ds.scalar(True)
d = ds.scalar("string")
e = ds.scalar(None)
f = ds.scalar({'a': 1})
g = ds.scalar(pa.scalar(1))
all_exprs = [a, b, c, d, e, f, g, a == b, a > b, a & b, a | b, ~c,
d.is_valid(), a.cast(pa.int32(), safe=False),
a.cast(pa.int32(), safe=False), a.isin([1, 2, 3]),
ds.field('i64') > 5, ds.field('i64') == 5,
ds.field('i64') == 7, ds.field('i64').is_null()]
for expr in all_exprs:
assert isinstance(expr, ds.Expression)
restored = pickle.loads(pickle.dumps(expr))
assert expr.equals(restored)
def test_expression_construction():
zero = ds.scalar(0)
one = ds.scalar(1)
true = ds.scalar(True)
false = ds.scalar(False)
string = ds.scalar("string")
field = ds.field("field")
zero | one == string
~true == false
for typ in ("bool", pa.bool_()):
field.cast(typ) == true
field.isin([1, 2])
with pytest.raises(TypeError):
field.isin(1)
with pytest.raises(pa.ArrowInvalid):
field != object()
def test_expression_boolean_operators():
# https://issues.apache.org/jira/browse/ARROW-11412
true = ds.scalar(True)
false = ds.scalar(False)
with pytest.raises(ValueError, match="cannot be evaluated to python True"):
true and false
with pytest.raises(ValueError, match="cannot be evaluated to python True"):
true or false
with pytest.raises(ValueError, match="cannot be evaluated to python True"):
bool(true)
with pytest.raises(ValueError, match="cannot be evaluated to python True"):
not true
def test_expression_arithmetic_operators():
dataset = ds.dataset(pa.table({'a': [1, 2, 3], 'b': [2, 2, 2]}))
a = ds.field("a")
b = ds.field("b")
result = dataset.to_table(columns={
"a+1": a + 1,
"b-a": b - a,
"a*2": a * 2,
"a/b": a.cast("float64") / b,
})
expected = pa.table({
"a+1": [2, 3, 4], "b-a": [1, 0, -1],
"a*2": [2, 4, 6], "a/b": [0.5, 1.0, 1.5],
})
assert result.equals(expected)
def test_partition_keys():
a, b, c = [ds.field(f) == f for f in 'abc']
assert ds._get_partition_keys(a) == {'a': 'a'}
assert ds._get_partition_keys(a & b & c) == {f: f for f in 'abc'}
nope = ds.field('d') >= 3
assert ds._get_partition_keys(nope) == {}
assert ds._get_partition_keys(a & nope) == {'a': 'a'}
null = ds.field('a').is_null()
assert ds._get_partition_keys(null) == {'a': None}
def test_parquet_read_options():
opts1 = ds.ParquetReadOptions()
opts2 = ds.ParquetReadOptions(dictionary_columns=['a', 'b'])
opts3 = ds.ParquetReadOptions(coerce_int96_timestamp_unit="ms")
assert opts1.dictionary_columns == set()
assert opts2.dictionary_columns == {'a', 'b'}
assert opts1.coerce_int96_timestamp_unit == "ns"
assert opts3.coerce_int96_timestamp_unit == "ms"
assert opts1 == opts1
assert opts1 != opts2
assert opts1 != opts3
def test_parquet_file_format_read_options():
pff1 = ds.ParquetFileFormat()
pff2 = ds.ParquetFileFormat(dictionary_columns={'a'})
pff3 = ds.ParquetFileFormat(coerce_int96_timestamp_unit="s")
assert pff1.read_options == ds.ParquetReadOptions()
assert pff2.read_options == ds.ParquetReadOptions(dictionary_columns=['a'])
assert pff3.read_options == ds.ParquetReadOptions(
coerce_int96_timestamp_unit="s")
def test_parquet_scan_options():
opts1 = ds.ParquetFragmentScanOptions()
opts2 = ds.ParquetFragmentScanOptions(buffer_size=4096)
opts3 = ds.ParquetFragmentScanOptions(
buffer_size=2**13, use_buffered_stream=True)
opts4 = ds.ParquetFragmentScanOptions(buffer_size=2**13, pre_buffer=True)
assert opts1.use_buffered_stream is False
assert opts1.buffer_size == 2**13
assert opts1.pre_buffer is False
assert opts2.use_buffered_stream is False
assert opts2.buffer_size == 2**12
assert opts2.pre_buffer is False
assert opts3.use_buffered_stream is True
assert opts3.buffer_size == 2**13
assert opts3.pre_buffer is False
assert opts4.use_buffered_stream is False
assert opts4.buffer_size == 2**13
assert opts4.pre_buffer is True
assert opts1 == opts1
assert opts1 != opts2
assert opts2 != opts3
assert opts3 != opts4
def test_file_format_pickling():
formats = [
ds.IpcFileFormat(),
ds.CsvFileFormat(),
ds.CsvFileFormat(pa.csv.ParseOptions(delimiter='\t',
ignore_empty_lines=True)),
ds.CsvFileFormat(read_options=pa.csv.ReadOptions(
skip_rows=3, column_names=['foo'])),
ds.CsvFileFormat(read_options=pa.csv.ReadOptions(
skip_rows=3, block_size=2**20)),
ds.ParquetFileFormat(),
ds.ParquetFileFormat(dictionary_columns={'a'}),
ds.ParquetFileFormat(use_buffered_stream=True),
ds.ParquetFileFormat(
use_buffered_stream=True,
buffer_size=4096,
)
]
try:
formats.append(ds.OrcFileFormat())
except (ImportError, AttributeError):
# catch AttributeError for Python 3.6
pass
for file_format in formats:
assert pickle.loads(pickle.dumps(file_format)) == file_format
def test_fragment_scan_options_pickling():
options = [
ds.CsvFragmentScanOptions(),
ds.CsvFragmentScanOptions(
convert_options=pa.csv.ConvertOptions(strings_can_be_null=True)),
ds.CsvFragmentScanOptions(
read_options=pa.csv.ReadOptions(block_size=2**16)),
ds.ParquetFragmentScanOptions(buffer_size=4096),
ds.ParquetFragmentScanOptions(pre_buffer=True),
]
for option in options:
assert pickle.loads(pickle.dumps(option)) == option
@pytest.mark.parametrize('paths_or_selector', [
fs.FileSelector('subdir', recursive=True),
[
'subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet',
]
])
@pytest.mark.parametrize('pre_buffer', [False, True])
def test_filesystem_factory(mockfs, paths_or_selector, pre_buffer):
format = ds.ParquetFileFormat(
read_options=ds.ParquetReadOptions(dictionary_columns={"str"}),
pre_buffer=pre_buffer
)
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
assert options.partition_base_dir == 'subdir'
assert options.selector_ignore_prefixes == ['.', '_']
assert options.exclude_invalid_files is False
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options
)
inspected_schema = factory.inspect()
assert factory.inspect().equals(pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64()),
pa.field('str', pa.dictionary(pa.int32(), pa.string())),
pa.field('const', pa.int64()),
pa.field('group', pa.int32()),
pa.field('key', pa.string()),
]), check_metadata=False)
assert isinstance(factory.inspect_schemas(), list)
assert isinstance(factory.finish(inspected_schema),
ds.FileSystemDataset)
assert factory.root_partition.equals(ds.scalar(True))
dataset = factory.finish()
assert isinstance(dataset, ds.FileSystemDataset)
scanner = dataset.scanner()
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
expected_str = pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2, 3, 4], type=pa.int32()),
pa.array("0 1 2 3 4".split(), type=pa.string())
)
iterator = scanner.scan_batches()
for (batch, fragment), group, key in zip(iterator, [1, 2], ['xxx', 'yyy']):
expected_group = pa.array([group] * 5, type=pa.int32())
expected_key = pa.array([key] * 5, type=pa.string())
expected_const = pa.array([group - 1] * 5, type=pa.int64())
# Can't compare or really introspect expressions from Python
assert fragment.partition_expression is not None
assert batch.num_columns == 6
assert batch[0].equals(expected_i64)
assert batch[1].equals(expected_f64)
assert batch[2].equals(expected_str)
assert batch[3].equals(expected_const)
assert batch[4].equals(expected_group)
assert batch[5].equals(expected_key)
table = dataset.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
assert table.num_columns == 6
def test_make_fragment(multisourcefs):
parquet_format = ds.ParquetFileFormat()
dataset = ds.dataset('/plain', filesystem=multisourcefs,
format=parquet_format)
for path in dataset.files:
fragment = parquet_format.make_fragment(path, multisourcefs)
assert fragment.row_groups == [0]
row_group_fragment = parquet_format.make_fragment(path, multisourcefs,
row_groups=[0])
for f in [fragment, row_group_fragment]:
assert isinstance(f, ds.ParquetFileFragment)
assert f.path == path
assert isinstance(f.filesystem, type(multisourcefs))
assert row_group_fragment.row_groups == [0]
def test_make_csv_fragment_from_buffer(dataset_reader):
content = textwrap.dedent("""
alpha,num,animal
a,12,dog
b,11,cat
c,10,rabbit
""")
buffer = pa.py_buffer(content.encode('utf-8'))
csv_format = ds.CsvFileFormat()
fragment = csv_format.make_fragment(buffer)
expected = pa.table([['a', 'b', 'c'],
[12, 11, 10],
['dog', 'cat', 'rabbit']],
names=['alpha', 'num', 'animal'])
assert dataset_reader.to_table(fragment).equals(expected)
pickled = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(pickled).equals(fragment.to_table())
@pytest.mark.parquet
def test_make_parquet_fragment_from_buffer(dataset_reader):
import pyarrow.parquet as pq
arrays = [
pa.array(['a', 'b', 'c']),
pa.array([12, 11, 10]),
pa.array(['dog', 'cat', 'rabbit'])
]
dictionary_arrays = [
arrays[0].dictionary_encode(),
arrays[1],
arrays[2].dictionary_encode()
]
dictionary_format = ds.ParquetFileFormat(
read_options=ds.ParquetReadOptions(
dictionary_columns=['alpha', 'animal']
),
use_buffered_stream=True,
buffer_size=4096,
)
cases = [
(arrays, ds.ParquetFileFormat()),
(dictionary_arrays, dictionary_format)
]
for arrays, format_ in cases:
table = pa.table(arrays, names=['alpha', 'num', 'animal'])
out = pa.BufferOutputStream()
pq.write_table(table, out)
buffer = out.getvalue()
fragment = format_.make_fragment(buffer)
assert dataset_reader.to_table(fragment).equals(table)
pickled = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(pickled).equals(table)
def _create_dataset_for_fragments(tempdir, chunk_size=None, filesystem=None):
import pyarrow.parquet as pq
table = pa.table(
[range(8), [1] * 8, ['a'] * 4 + ['b'] * 4],
names=['f1', 'f2', 'part']
)
path = str(tempdir / "test_parquet_dataset")
# write_to_dataset currently requires pandas
pq.write_to_dataset(table, path,
partition_cols=["part"], chunk_size=chunk_size)
dataset = ds.dataset(
path, format="parquet", partitioning="hive", filesystem=filesystem
)
return table, dataset
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir)
# list fragments
fragments = list(dataset.get_fragments())
assert len(fragments) == 2
f = fragments[0]
physical_names = ['f1', 'f2']
# file's schema does not include partition column
assert f.physical_schema.names == physical_names
assert f.format.inspect(f.path, f.filesystem) == f.physical_schema
assert f.partition_expression.equals(ds.field('part') == 'a')
# By default, the partition column is not part of the schema.
result = dataset_reader.to_table(f)
assert result.column_names == physical_names
assert result.equals(table.remove_column(2).slice(0, 4))
# scanning fragment includes partition columns when given the proper
# schema.
result = dataset_reader.to_table(f, schema=dataset.schema)
assert result.column_names == ['f1', 'f2', 'part']
assert result.equals(table.slice(0, 4))
assert f.physical_schema == result.schema.remove(2)
# scanning fragments follow filter predicate
result = dataset_reader.to_table(
f, schema=dataset.schema, filter=ds.field('f1') < 2)
assert result.column_names == ['f1', 'f2', 'part']
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_implicit_cast(tempdir):
# ARROW-8693
import pyarrow.parquet as pq
table = pa.table([range(8), [1] * 4 + [2] * 4], names=['col', 'part'])
path = str(tempdir / "test_parquet_dataset")
pq.write_to_dataset(table, path, partition_cols=["part"])
part = ds.partitioning(pa.schema([('part', 'int8')]), flavor="hive")
dataset = ds.dataset(path, format="parquet", partitioning=part)
fragments = dataset.get_fragments(filter=ds.field("part") >= 2)
assert len(list(fragments)) == 1
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_reconstruct(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir)
def assert_yields_projected(fragment, row_slice,
columns=None, filter=None):
actual = fragment.to_table(
schema=table.schema, columns=columns, filter=filter)
column_names = columns if columns else table.column_names
assert actual.column_names == column_names
expected = table.slice(*row_slice).select(column_names)
assert actual.equals(expected)
fragment = list(dataset.get_fragments())[0]
parquet_format = fragment.format
# test pickle roundtrip
pickled_fragment = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(
pickled_fragment) == dataset_reader.to_table(fragment)
# manually re-construct a fragment, with explicit schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert dataset_reader.to_table(new_fragment).equals(
dataset_reader.to_table(fragment))
assert_yields_projected(new_fragment, (0, 4))
# filter / column projection, inspected schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 2), filter=ds.field('f1') < 2)
# filter requiring cast / column projection, inspected schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 2),
columns=['f1'], filter=ds.field('f1') < 2.0)
# filter on the partition column
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 4),
filter=ds.field('part') == 'a')
# Fragments don't contain the partition's columns if not provided to the
# `to_table(schema=...)` method.
pattern = (r'No match for FieldRef.Name\(part\) in ' +
fragment.physical_schema.to_string(False, False, False))
with pytest.raises(ValueError, match=pattern):
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
dataset_reader.to_table(new_fragment, filter=ds.field('part') == 'a')
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
# list and scan row group fragments
row_group_fragments = list(fragment.split_by_row_group())
assert len(row_group_fragments) == fragment.num_row_groups == 2
result = dataset_reader.to_table(
row_group_fragments[0], schema=dataset.schema)
assert result.column_names == ['f1', 'f2', 'part']
assert len(result) == 2
assert result.equals(table.slice(0, 2))
assert row_group_fragments[0].row_groups is not None
assert row_group_fragments[0].num_row_groups == 1
assert row_group_fragments[0].row_groups[0].statistics == {
'f1': {'min': 0, 'max': 1},
'f2': {'min': 1, 'max': 1},
}
fragment = list(dataset.get_fragments(filter=ds.field('f1') < 1))[0]
row_group_fragments = list(fragment.split_by_row_group(ds.field('f1') < 1))
assert len(row_group_fragments) == 1
result = dataset_reader.to_table(
row_group_fragments[0], filter=ds.field('f1') < 1)
assert len(result) == 1
@pytest.mark.parquet
def test_fragments_parquet_num_row_groups(tempdir):
import pyarrow.parquet as pq
table = pa.table({'a': range(8)})
pq.write_table(table, tempdir / "test.parquet", row_group_size=2)
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
original_fragment = list(dataset.get_fragments())[0]
# create fragment with subset of row groups
fragment = original_fragment.format.make_fragment(
original_fragment.path, original_fragment.filesystem,
row_groups=[1, 3])
assert fragment.num_row_groups == 2
# ensure that parsing metadata preserves correct number of row groups
fragment.ensure_complete_metadata()
assert fragment.num_row_groups == 2
assert len(fragment.row_groups) == 2
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups_dictionary(tempdir, dataset_reader):
import pandas as pd
df = pd.DataFrame(dict(col1=['a', 'b'], col2=[1, 2]))
df['col1'] = df['col1'].astype("category")
import pyarrow.parquet as pq
pq.write_table(pa.table(df), tempdir / "test_filter_dictionary.parquet")
import pyarrow.dataset as ds
dataset = ds.dataset(tempdir / 'test_filter_dictionary.parquet')
result = dataset_reader.to_table(dataset, filter=ds.field("col1") == "a")
assert (df.iloc[0] == result.to_pandas()).all().all()
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_ensure_metadata(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
_, dataset = _create_dataset_for_fragments(
tempdir, chunk_size=2, filesystem=fs
)
fragment = list(dataset.get_fragments())[0]
# with default discovery, no metadata loaded
with assert_opens([fragment.path]):
fragment.ensure_complete_metadata()
assert fragment.row_groups == [0, 1]
# second time -> use cached / no file IO
with assert_opens([]):
fragment.ensure_complete_metadata()
# recreate fragment with row group ids
new_fragment = fragment.format.make_fragment(
fragment.path, fragment.filesystem, row_groups=[0, 1]
)
assert new_fragment.row_groups == fragment.row_groups
# collect metadata
new_fragment.ensure_complete_metadata()
row_group = new_fragment.row_groups[0]
assert row_group.id == 0
assert row_group.num_rows == 2
assert row_group.statistics is not None
# pickling preserves row group ids
pickled_fragment = pickle.loads(pickle.dumps(new_fragment))
with assert_opens([fragment.path]):
assert pickled_fragment.row_groups == [0, 1]
row_group = pickled_fragment.row_groups[0]
assert row_group.id == 0
assert row_group.statistics is not None
def _create_dataset_all_types(tempdir, chunk_size=None):
import pyarrow.parquet as pq
table = pa.table(
[
pa.array([True, None, False], pa.bool_()),
pa.array([1, 10, 42], pa.int8()),
pa.array([1, 10, 42], pa.uint8()),
pa.array([1, 10, 42], pa.int16()),
pa.array([1, 10, 42], pa.uint16()),
pa.array([1, 10, 42], pa.int32()),
pa.array([1, 10, 42], pa.uint32()),
pa.array([1, 10, 42], pa.int64()),
pa.array([1, 10, 42], pa.uint64()),
pa.array([1.0, 10.0, 42.0], pa.float32()),
pa.array([1.0, 10.0, 42.0], pa.float64()),
pa.array(['a', None, 'z'], pa.utf8()),
pa.array(['a', None, 'z'], pa.binary()),
pa.array([1, 10, 42], pa.timestamp('s')),
pa.array([1, 10, 42], pa.timestamp('ms')),
pa.array([1, 10, 42], pa.timestamp('us')),
pa.array([1, 10, 42], pa.date32()),
pa.array([1, 10, 4200000000], pa.date64()),
pa.array([1, 10, 42], pa.time32('s')),
pa.array([1, 10, 42], pa.time64('us')),
],
names=[
'boolean',
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
'float',
'double',
'utf8',
'binary',
'ts[s]',
'ts[ms]',
'ts[us]',
'date32',
'date64',
'time32',
'time64',
]
)
path = str(tempdir / "test_parquet_dataset_all_types")
# write_to_dataset currently requires pandas
pq.write_to_dataset(table, path, chunk_size=chunk_size)
return table, ds.dataset(path, format="parquet", partitioning="hive")
@pytest.mark.pandas
@pytest.mark.parquet
def test_parquet_fragment_statistics(tempdir):
table, dataset = _create_dataset_all_types(tempdir)
fragment = list(dataset.get_fragments())[0]
import datetime
def dt_s(x): return datetime.datetime(1970, 1, 1, 0, 0, x)
def dt_ms(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x*1000)
def dt_us(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x)
date = datetime.date
time = datetime.time
# list and scan row group fragments
row_group_fragments = list(fragment.split_by_row_group())
assert row_group_fragments[0].row_groups is not None
row_group = row_group_fragments[0].row_groups[0]
assert row_group.num_rows == 3
assert row_group.total_byte_size > 1000
assert row_group.statistics == {
'boolean': {'min': False, 'max': True},
'int8': {'min': 1, 'max': 42},
'uint8': {'min': 1, 'max': 42},
'int16': {'min': 1, 'max': 42},
'uint16': {'min': 1, 'max': 42},
'int32': {'min': 1, 'max': 42},
'uint32': {'min': 1, 'max': 42},
'int64': {'min': 1, 'max': 42},
'uint64': {'min': 1, 'max': 42},
'float': {'min': 1.0, 'max': 42.0},
'double': {'min': 1.0, 'max': 42.0},
'utf8': {'min': 'a', 'max': 'z'},
'binary': {'min': b'a', 'max': b'z'},
'ts[s]': {'min': dt_s(1), 'max': dt_s(42)},
'ts[ms]': {'min': dt_ms(1), 'max': dt_ms(42)},
'ts[us]': {'min': dt_us(1), 'max': dt_us(42)},
'date32': {'min': date(1970, 1, 2), 'max': date(1970, 2, 12)},
'date64': {'min': date(1970, 1, 1), 'max': date(1970, 2, 18)},
'time32': {'min': time(0, 0, 1), 'max': time(0, 0, 42)},
'time64': {'min': time(0, 0, 0, 1), 'max': time(0, 0, 0, 42)},
}
@pytest.mark.parquet
def test_parquet_fragment_statistics_nulls(tempdir):
import pyarrow.parquet as pq
table = pa.table({'a': [0, 1, None, None], 'b': ['a', 'b', None, None]})
pq.write_table(table, tempdir / "test.parquet", row_group_size=2)
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
fragments = list(dataset.get_fragments())[0].split_by_row_group()
# second row group has all nulls -> no statistics
assert fragments[1].row_groups[0].statistics == {}
@pytest.mark.pandas
@pytest.mark.parquet
def test_parquet_empty_row_group_statistics(tempdir):
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
df.to_parquet(tempdir / "test.parquet", engine="pyarrow")
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
fragments = list(dataset.get_fragments())[0].split_by_row_group()
# Only row group is empty
assert fragments[0].row_groups[0].statistics == {}
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups_predicate(tempdir):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
assert fragment.partition_expression.equals(ds.field('part') == 'a')
# predicate may reference a partition field not present in the
# physical_schema if an explicit schema is provided to split_by_row_group
# filter matches partition_expression: all row groups
row_group_fragments = list(
fragment.split_by_row_group(filter=ds.field('part') == 'a',
schema=dataset.schema))
assert len(row_group_fragments) == 2
# filter contradicts partition_expression: no row groups
row_group_fragments = list(
fragment.split_by_row_group(filter=ds.field('part') == 'b',
schema=dataset.schema))
assert len(row_group_fragments) == 0
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups_reconstruct(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
parquet_format = fragment.format
row_group_fragments = list(fragment.split_by_row_group())
# test pickle roundtrip
pickled_fragment = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(
pickled_fragment) == dataset_reader.to_table(fragment)
# manually re-construct row group fragments
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups=[0])
result = dataset_reader.to_table(new_fragment)
assert result.equals(dataset_reader.to_table(row_group_fragments[0]))
# manually re-construct a row group fragment with filter/column projection
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups={1})
result = dataset_reader.to_table(
new_fragment, schema=table.schema, columns=['f1', 'part'],
filter=ds.field('f1') < 3, )
assert result.column_names == ['f1', 'part']
assert len(result) == 1
# out of bounds row group index
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups={2})
with pytest.raises(IndexError, match="references row group 2"):
dataset_reader.to_table(new_fragment)
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_subset_ids(tempdir, open_logging_fs,
dataset_reader):
fs, assert_opens = open_logging_fs
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1,
filesystem=fs)
fragment = list(dataset.get_fragments())[0]
# select with row group ids
subfrag = fragment.subset(row_group_ids=[0, 3])
with assert_opens([]):
assert subfrag.num_row_groups == 2
assert subfrag.row_groups == [0, 3]
assert subfrag.row_groups[0].statistics is not None
# check correct scan result of subset
result = dataset_reader.to_table(subfrag)
assert result.to_pydict() == {"f1": [0, 3], "f2": [1, 1]}
# empty list of ids
subfrag = fragment.subset(row_group_ids=[])
assert subfrag.num_row_groups == 0
assert subfrag.row_groups == []
result = dataset_reader.to_table(subfrag, schema=dataset.schema)
assert result.num_rows == 0
assert result.equals(table[:0])
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_subset_filter(tempdir, open_logging_fs,
dataset_reader):
fs, assert_opens = open_logging_fs
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1,
filesystem=fs)
fragment = list(dataset.get_fragments())[0]
# select with filter
subfrag = fragment.subset(ds.field("f1") >= 1)
with assert_opens([]):
assert subfrag.num_row_groups == 3
assert len(subfrag.row_groups) == 3
assert subfrag.row_groups[0].statistics is not None
# check correct scan result of subset
result = dataset_reader.to_table(subfrag)
assert result.to_pydict() == {"f1": [1, 2, 3], "f2": [1, 1, 1]}
# filter that results in empty selection
subfrag = fragment.subset(ds.field("f1") > 5)
assert subfrag.num_row_groups == 0
assert subfrag.row_groups == []
result = dataset_reader.to_table(subfrag, schema=dataset.schema)
assert result.num_rows == 0
assert result.equals(table[:0])
# passing schema to ensure filter on partition expression works
subfrag = fragment.subset(ds.field("part") == "a", schema=dataset.schema)
assert subfrag.num_row_groups == 4
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_subset_invalid(tempdir):
_, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1)
fragment = list(dataset.get_fragments())[0]
# passing none or both of filter / row_group_ids
with pytest.raises(ValueError):
fragment.subset(ds.field("f1") >= 1, row_group_ids=[1, 2])
with pytest.raises(ValueError):
fragment.subset()
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_repr(tempdir, dataset):
# partitioned parquet dataset
fragment = list(dataset.get_fragments())[0]
assert (
repr(fragment) ==
"<pyarrow.dataset.ParquetFileFragment path=subdir/1/xxx/file0.parquet "
"partition=[key=xxx, group=1]>"
)
# single-file parquet dataset (no partition information in repr)
table, path = _create_single_file(tempdir)
dataset = ds.dataset(path, format="parquet")
fragment = list(dataset.get_fragments())[0]
assert (
repr(fragment) ==
"<pyarrow.dataset.ParquetFileFragment path={}>".format(
dataset.filesystem.normalize_path(str(path)))
)
# non-parquet format
path = tempdir / "data.feather"
pa.feather.write_feather(table, path)
dataset = ds.dataset(path, format="feather")
fragment = list(dataset.get_fragments())[0]
assert (
repr(fragment) ==
"<pyarrow.dataset.FileFragment type=ipc path={}>".format(
dataset.filesystem.normalize_path(str(path)))
)
def test_partitioning_factory(mockfs):
paths_or_selector = fs.FileSelector('subdir', recursive=True)
format = ds.ParquetFileFormat()
options = ds.FileSystemFactoryOptions('subdir')
partitioning_factory = ds.DirectoryPartitioning.discover(['group', 'key'])
assert isinstance(partitioning_factory, ds.PartitioningFactory)
options.partitioning_factory = partitioning_factory
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options
)
inspected_schema = factory.inspect()
# i64/f64 from data, group/key from "/1/xxx" and "/2/yyy" paths
expected_schema = pa.schema([
("i64", pa.int64()),
("f64", pa.float64()),
("str", pa.string()),
("const", pa.int64()),
("group", pa.int32()),
("key", pa.string()),
])
assert inspected_schema.equals(expected_schema)
hive_partitioning_factory = ds.HivePartitioning.discover()
assert isinstance(hive_partitioning_factory, ds.PartitioningFactory)
@pytest.mark.parametrize('infer_dictionary', [False, True])
def test_partitioning_factory_dictionary(mockfs, infer_dictionary):
paths_or_selector = fs.FileSelector('subdir', recursive=True)
format = ds.ParquetFileFormat()
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning_factory = ds.DirectoryPartitioning.discover(
['group', 'key'], infer_dictionary=infer_dictionary)
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options)
inferred_schema = factory.inspect()
if infer_dictionary:
expected_type = pa.dictionary(pa.int32(), pa.string())
assert inferred_schema.field('key').type == expected_type
table = factory.finish().to_table().combine_chunks()
actual = table.column('key').chunk(0)
expected = pa.array(['xxx'] * 5 + ['yyy'] * 5).dictionary_encode()
assert actual.equals(expected)
# ARROW-9345 ensure filtering on the partition field works
table = factory.finish().to_table(filter=ds.field('key') == 'xxx')
actual = table.column('key').chunk(0)
expected = expected.slice(0, 5)
assert actual.equals(expected)
else:
assert inferred_schema.field('key').type == pa.string()
def test_partitioning_factory_segment_encoding():
mockfs = fs._MockFileSystem()
format = ds.IpcFileFormat()
schema = pa.schema([("i64", pa.int64())])
table = pa.table([pa.array(range(10))], schema=schema)
partition_schema = pa.schema(
[("date", pa.timestamp("s")), ("string", pa.string())])
string_partition_schema = pa.schema(
[("date", pa.string()), ("string", pa.string())])
full_schema = pa.schema(list(schema) + list(partition_schema))
for directory in [
"directory/2021-05-04 00%3A00%3A00/%24",
"hive/date=2021-05-04 00%3A00%3A00/string=%24",
]:
mockfs.create_dir(directory)
with mockfs.open_output_stream(directory + "/0.feather") as sink:
with pa.ipc.new_file(sink, schema) as writer:
writer.write_table(table)
writer.close()
# Directory
selector = fs.FileSelector("directory", recursive=True)
options = ds.FileSystemFactoryOptions("directory")
options.partitioning_factory = ds.DirectoryPartitioning.discover(
schema=partition_schema)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
inferred_schema = factory.inspect()
assert inferred_schema == full_schema
actual = factory.finish().to_table(columns={
"date_int": ds.field("date").cast(pa.int64()),
})
assert actual[0][0].as_py() == 1620086400
options.partitioning_factory = ds.DirectoryPartitioning.discover(
["date", "string"], segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning = ds.DirectoryPartitioning(
string_partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning_factory = ds.DirectoryPartitioning.discover(
schema=partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
with pytest.raises(pa.ArrowInvalid,
match="Could not cast segments for partition field"):
inferred_schema = factory.inspect()
# Hive
selector = fs.FileSelector("hive", recursive=True)
options = ds.FileSystemFactoryOptions("hive")
options.partitioning_factory = ds.HivePartitioning.discover(
schema=partition_schema)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
inferred_schema = factory.inspect()
assert inferred_schema == full_schema
actual = factory.finish().to_table(columns={
"date_int": ds.field("date").cast(pa.int64()),
})
assert actual[0][0].as_py() == 1620086400
options.partitioning_factory = ds.HivePartitioning.discover(
segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning = ds.HivePartitioning(
string_partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning_factory = ds.HivePartitioning.discover(
schema=partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
with pytest.raises(pa.ArrowInvalid,
match="Could not cast segments for partition field"):
inferred_schema = factory.inspect()
def test_dictionary_partitioning_outer_nulls_raises(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']})
part = ds.partitioning(
pa.schema([pa.field('a', pa.string()), pa.field('b', pa.string())]))
with pytest.raises(pa.ArrowInvalid):
ds.write_dataset(table, tempdir, format='parquet', partitioning=part)
def _has_subdirs(basedir):
elements = os.listdir(basedir)
return any([os.path.isdir(os.path.join(basedir, el)) for el in elements])
def _do_list_all_dirs(basedir, path_so_far, result):
for f in os.listdir(basedir):
true_nested = os.path.join(basedir, f)
if os.path.isdir(true_nested):
norm_nested = posixpath.join(path_so_far, f)
if _has_subdirs(true_nested):
_do_list_all_dirs(true_nested, norm_nested, result)
else:
result.append(norm_nested)
def _list_all_dirs(basedir):
result = []
_do_list_all_dirs(basedir, '', result)
return result
def _check_dataset_directories(tempdir, expected_directories):
actual_directories = set(_list_all_dirs(tempdir))
assert actual_directories == set(expected_directories)
def test_dictionary_partitioning_inner_nulls(tempdir):
table = pa.table({'a': ['x', 'y', 'z'], 'b': ['x', 'y', None]})
part = ds.partitioning(
pa.schema([pa.field('a', pa.string()), pa.field('b', pa.string())]))
ds.write_dataset(table, tempdir, format='parquet', partitioning=part)
_check_dataset_directories(tempdir, ['x/x', 'y/y', 'z'])
def test_hive_partitioning_nulls(tempdir):
table = pa.table({'a': ['x', None, 'z'], 'b': ['x', 'y', None]})
part = ds.HivePartitioning(pa.schema(
[pa.field('a', pa.string()), pa.field('b', pa.string())]), None, 'xyz')
ds.write_dataset(table, tempdir, format='parquet', partitioning=part)
_check_dataset_directories(tempdir, ['a=x/b=x', 'a=xyz/b=y', 'a=z/b=xyz'])
def test_partitioning_function():
schema = pa.schema([("year", pa.int16()), ("month", pa.int8())])
names = ["year", "month"]
# default DirectoryPartitioning
part = ds.partitioning(schema)
assert isinstance(part, ds.DirectoryPartitioning)
part = ds.partitioning(schema, dictionaries="infer")
assert isinstance(part, ds.PartitioningFactory)
part = ds.partitioning(field_names=names)
assert isinstance(part, ds.PartitioningFactory)
# needs schema or list of names
with pytest.raises(ValueError):
ds.partitioning()
with pytest.raises(ValueError, match="Expected list"):
ds.partitioning(field_names=schema)
with pytest.raises(ValueError, match="Cannot specify both"):
ds.partitioning(schema, field_names=schema)
# Hive partitioning
part = ds.partitioning(schema, flavor="hive")
assert isinstance(part, ds.HivePartitioning)
part = ds.partitioning(schema, dictionaries="infer", flavor="hive")
assert isinstance(part, ds.PartitioningFactory)
part = ds.partitioning(flavor="hive")
assert isinstance(part, ds.PartitioningFactory)
# cannot pass list of names
with pytest.raises(ValueError):
ds.partitioning(names, flavor="hive")
with pytest.raises(ValueError, match="Cannot specify 'field_names'"):
ds.partitioning(field_names=names, flavor="hive")
# unsupported flavor
with pytest.raises(ValueError):
ds.partitioning(schema, flavor="unsupported")
def test_directory_partitioning_dictionary_key(mockfs):
# ARROW-8088 specifying partition key as dictionary type
schema = pa.schema([
pa.field('group', pa.dictionary(pa.int8(), pa.int32())),
pa.field('key', pa.dictionary(pa.int8(), pa.string()))
])
part = ds.DirectoryPartitioning.discover(schema=schema)
dataset = ds.dataset(
"subdir", format="parquet", filesystem=mockfs, partitioning=part
)
assert dataset.partitioning.schema == schema
table = dataset.to_table()
assert table.column('group').type.equals(schema.types[0])
assert table.column('group').to_pylist() == [1] * 5 + [2] * 5
assert table.column('key').type.equals(schema.types[1])
assert table.column('key').to_pylist() == ['xxx'] * 5 + ['yyy'] * 5
def test_hive_partitioning_dictionary_key(multisourcefs):
# ARROW-8088 specifying partition key as dictionary type
schema = pa.schema([
pa.field('year', pa.dictionary(pa.int8(), pa.int16())),
pa.field('month', pa.dictionary(pa.int8(), pa.int16()))
])
part = ds.HivePartitioning.discover(schema=schema)
dataset = ds.dataset(
"hive", format="parquet", filesystem=multisourcefs, partitioning=part
)
assert dataset.partitioning.schema == schema
table = dataset.to_table()
year_dictionary = list(range(2006, 2011))
month_dictionary = list(range(1, 13))
assert table.column('year').type.equals(schema.types[0])
for chunk in table.column('year').chunks:
actual = chunk.dictionary.to_pylist()
actual.sort()
assert actual == year_dictionary
assert table.column('month').type.equals(schema.types[1])
for chunk in table.column('month').chunks:
actual = chunk.dictionary.to_pylist()
actual.sort()
assert actual == month_dictionary
def _create_single_file(base_dir, table=None, row_group_size=None):
import pyarrow.parquet as pq
if table is None:
table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path = base_dir / "test.parquet"
pq.write_table(table, path, row_group_size=row_group_size)
return table, path
def _create_directory_of_files(base_dir):
import pyarrow.parquet as pq
table1 = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path1 = base_dir / "test1.parquet"
pq.write_table(table1, path1)
table2 = pa.table({'a': range(9, 18), 'b': [0.] * 4 + [1.] * 5})
path2 = base_dir / "test2.parquet"
pq.write_table(table2, path2)
return (table1, table2), (path1, path2)
def _check_dataset(dataset, table, dataset_reader):
# also test that pickle roundtrip keeps the functionality
for d in [dataset, pickle.loads(pickle.dumps(dataset))]:
assert dataset.schema.equals(table.schema)
assert dataset_reader.to_table(dataset).equals(table)
def _check_dataset_from_path(path, table, dataset_reader, **kwargs):
# pathlib object
assert isinstance(path, pathlib.Path)
# accept Path, str, List[Path], List[str]
for p in [path, str(path), [path], [str(path)]]:
dataset = ds.dataset(path, **kwargs)
assert isinstance(dataset, ds.FileSystemDataset)
_check_dataset(dataset, table, dataset_reader)
# relative string path
with change_cwd(path.parent):
dataset = ds.dataset(path.name, **kwargs)
assert isinstance(dataset, ds.FileSystemDataset)
_check_dataset(dataset, table, dataset_reader)
@pytest.mark.parquet
def test_open_dataset_single_file(tempdir, dataset_reader):
table, path = _create_single_file(tempdir)
_check_dataset_from_path(path, table, dataset_reader)
@pytest.mark.parquet
def test_deterministic_row_order(tempdir, dataset_reader):
# ARROW-8447 Ensure that dataset.to_table (and Scanner::ToTable) returns a
# deterministic row ordering. This is achieved by constructing a single
# parquet file with one row per RowGroup.
table, path = _create_single_file(tempdir, row_group_size=1)
_check_dataset_from_path(path, table, dataset_reader)
@pytest.mark.parquet
def test_open_dataset_directory(tempdir, dataset_reader):
tables, _ = _create_directory_of_files(tempdir)
table = pa.concat_tables(tables)
_check_dataset_from_path(tempdir, table, dataset_reader)
@pytest.mark.parquet
def test_open_dataset_list_of_files(tempdir, dataset_reader):
tables, (path1, path2) = _create_directory_of_files(tempdir)
table = pa.concat_tables(tables)
datasets = [
ds.dataset([path1, path2]),
ds.dataset([str(path1), str(path2)])
]
datasets += [
pickle.loads(pickle.dumps(d)) for d in datasets
]
for dataset in datasets:
assert dataset.schema.equals(table.schema)
result = dataset_reader.to_table(dataset)
assert result.equals(table)
@pytest.mark.parquet
def test_open_dataset_filesystem_fspath(tempdir):
# single file
table, path = _create_single_file(tempdir)
fspath = FSProtocolClass(path)
# filesystem inferred from path
dataset1 = ds.dataset(fspath)
assert dataset1.schema.equals(table.schema)
# filesystem specified
dataset2 = ds.dataset(fspath, filesystem=fs.LocalFileSystem())
assert dataset2.schema.equals(table.schema)
# passing different filesystem
with pytest.raises(TypeError):
ds.dataset(fspath, filesystem=fs._MockFileSystem())
def test_construct_from_single_file(tempdir, dataset_reader):
directory = tempdir / 'single-file'
directory.mkdir()
table, path = _create_single_file(directory)
relative_path = path.relative_to(directory)
# instantiate from a single file
d1 = ds.dataset(path)
# instantiate from a single file with a filesystem object
d2 = ds.dataset(path, filesystem=fs.LocalFileSystem())
# instantiate from a single file with prefixed filesystem URI
d3 = ds.dataset(str(relative_path), filesystem=_filesystem_uri(directory))
# pickle roundtrip
d4 = pickle.loads(pickle.dumps(d1))
assert dataset_reader.to_table(d1) == dataset_reader.to_table(
d2) == dataset_reader.to_table(d3) == dataset_reader.to_table(d4)
def test_construct_from_single_directory(tempdir, dataset_reader):
directory = tempdir / 'single-directory'
directory.mkdir()
tables, paths = _create_directory_of_files(directory)
d1 = ds.dataset(directory)
d2 = ds.dataset(directory, filesystem=fs.LocalFileSystem())
d3 = ds.dataset(directory.name, filesystem=_filesystem_uri(tempdir))
t1 = dataset_reader.to_table(d1)
t2 = dataset_reader.to_table(d2)
t3 = dataset_reader.to_table(d3)
assert t1 == t2 == t3
# test pickle roundtrip
for d in [d1, d2, d3]:
restored = pickle.loads(pickle.dumps(d))
assert dataset_reader.to_table(restored) == t1
def test_construct_from_list_of_files(tempdir, dataset_reader):
# instantiate from a list of files
directory = tempdir / 'list-of-files'
directory.mkdir()
tables, paths = _create_directory_of_files(directory)
relative_paths = [p.relative_to(tempdir) for p in paths]
with change_cwd(tempdir):
d1 = ds.dataset(relative_paths)
t1 = dataset_reader.to_table(d1)
assert len(t1) == sum(map(len, tables))
d2 = ds.dataset(relative_paths, filesystem=_filesystem_uri(tempdir))
t2 = dataset_reader.to_table(d2)
d3 = ds.dataset(paths)
t3 = dataset_reader.to_table(d3)
d4 = ds.dataset(paths, filesystem=fs.LocalFileSystem())
t4 = dataset_reader.to_table(d4)
assert t1 == t2 == t3 == t4
def test_construct_from_list_of_mixed_paths_fails(mockfs):
# isntantiate from a list of mixed paths
files = [
'subdir/1/xxx/file0.parquet',
'subdir/1/xxx/doesnt-exist.parquet',
]
with pytest.raises(FileNotFoundError, match='doesnt-exist'):
ds.dataset(files, filesystem=mockfs)
def test_construct_from_mixed_child_datasets(mockfs):
# isntantiate from a list of mixed paths
a = ds.dataset(['subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet'], filesystem=mockfs)
b = ds.dataset('subdir', filesystem=mockfs)
dataset = ds.dataset([a, b])
assert isinstance(dataset, ds.UnionDataset)
assert len(list(dataset.get_fragments())) == 4
table = dataset.to_table()
assert len(table) == 20
assert table.num_columns == 4
assert len(dataset.children) == 2
for child in dataset.children:
assert child.files == ['subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet']
def test_construct_empty_dataset():
empty = ds.dataset([])
table = empty.to_table()
assert table.num_rows == 0
assert table.num_columns == 0
def test_construct_dataset_with_invalid_schema():
empty = ds.dataset([], schema=pa.schema([
('a', pa.int64()),
('a', pa.string())
]))
with pytest.raises(ValueError, match='Multiple matches for .*a.* in '):
empty.to_table()
def test_construct_from_invalid_sources_raise(multisourcefs):
child1 = ds.FileSystemDatasetFactory(
multisourcefs,
fs.FileSelector('/plain'),
format=ds.ParquetFileFormat()
)
child2 = ds.FileSystemDatasetFactory(
multisourcefs,
fs.FileSelector('/schema'),
format=ds.ParquetFileFormat()
)
batch1 = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"])
batch2 = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["b"])
with pytest.raises(TypeError, match='Expected.*FileSystemDatasetFactory'):
ds.dataset([child1, child2])
expected = (
"Expected a list of path-like or dataset objects, or a list "
"of batches or tables. The given list contains the following "
"types: int"
)
with pytest.raises(TypeError, match=expected):
ds.dataset([1, 2, 3])
expected = (
"Expected a path-like, list of path-likes or a list of Datasets "
"instead of the given type: NoneType"
)
with pytest.raises(TypeError, match=expected):
ds.dataset(None)
expected = (
"Expected a path-like, list of path-likes or a list of Datasets "
"instead of the given type: generator"
)
with pytest.raises(TypeError, match=expected):
ds.dataset((batch1 for _ in range(3)))
expected = (
"Must provide schema to construct in-memory dataset from an empty list"
)
with pytest.raises(ValueError, match=expected):
ds.InMemoryDataset([])
expected = (
"Item has schema\nb: int64\nwhich does not match expected schema\n"
"a: int64"
)
with pytest.raises(TypeError, match=expected):
ds.dataset([batch1, batch2])
expected = (
"Expected a list of path-like or dataset objects, or a list of "
"batches or tables. The given list contains the following types:"
)
with pytest.raises(TypeError, match=expected):
ds.dataset([batch1, 0])
expected = (
"Expected a list of tables or batches. The given list contains a int"
)
with pytest.raises(TypeError, match=expected):
ds.InMemoryDataset([batch1, 0])
def test_construct_in_memory(dataset_reader):
batch = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"])
table = pa.Table.from_batches([batch])
assert ds.dataset([], schema=pa.schema([])).to_table() == pa.table([])
for source in (batch, table, [batch], [table]):
dataset = ds.dataset(source)
assert dataset_reader.to_table(dataset) == table
assert len(list(dataset.get_fragments())) == 1
assert next(dataset.get_fragments()).to_table() == table
assert pa.Table.from_batches(list(dataset.to_batches())) == table
@pytest.mark.parametrize('use_threads,use_async',
[(False, False), (False, True),
(True, False), (True, True)])
def test_scan_iterator(use_threads, use_async):
batch = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"])
table = pa.Table.from_batches([batch])
# When constructed from readers/iterators, should be one-shot
match = "OneShotFragment was already scanned"
for factory, schema in (
(lambda: pa.ipc.RecordBatchReader.from_batches(
batch.schema, [batch]), None),
(lambda: (batch for _ in range(1)), batch.schema),
):
# Scanning the fragment consumes the underlying iterator
scanner = ds.Scanner.from_batches(
factory(), schema=schema, use_threads=use_threads,
use_async=use_async)
assert scanner.to_table() == table
with pytest.raises(pa.ArrowInvalid, match=match):
scanner.to_table()
def _create_partitioned_dataset(basedir):
import pyarrow.parquet as pq
table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path = basedir / "dataset-partitioned"
path.mkdir()
for i in range(3):
part = path / "part={}".format(i)
part.mkdir()
pq.write_table(table.slice(3*i, 3), part / "test.parquet")
full_table = table.append_column(
"part", pa.array(np.repeat([0, 1, 2], 3), type=pa.int32()))
return full_table, path
@pytest.mark.parquet
def test_open_dataset_partitioned_directory(tempdir, dataset_reader):
full_table, path = _create_partitioned_dataset(tempdir)
# no partitioning specified, just read all individual files
table = full_table.select(['a', 'b'])
_check_dataset_from_path(path, table, dataset_reader)
# specify partition scheme with discovery
dataset = ds.dataset(
str(path), partitioning=ds.partitioning(flavor="hive"))
assert dataset.schema.equals(full_table.schema)
# specify partition scheme with discovery and relative path
with change_cwd(tempdir):
dataset = ds.dataset("dataset-partitioned/",
partitioning=ds.partitioning(flavor="hive"))
assert dataset.schema.equals(full_table.schema)
# specify partition scheme with string short-cut
dataset = ds.dataset(str(path), partitioning="hive")
assert dataset.schema.equals(full_table.schema)
# specify partition scheme with explicit scheme
dataset = ds.dataset(
str(path),
partitioning=ds.partitioning(
pa.schema([("part", pa.int8())]), flavor="hive"))
expected_schema = table.schema.append(pa.field("part", pa.int8()))
assert dataset.schema.equals(expected_schema)
result = dataset.to_table()
expected = table.append_column(
"part", pa.array(np.repeat([0, 1, 2], 3), type=pa.int8()))
assert result.equals(expected)
@pytest.mark.parquet
def test_open_dataset_filesystem(tempdir):
# single file
table, path = _create_single_file(tempdir)
# filesystem inferred from path
dataset1 = ds.dataset(str(path))
assert dataset1.schema.equals(table.schema)
# filesystem specified
dataset2 = ds.dataset(str(path), filesystem=fs.LocalFileSystem())
assert dataset2.schema.equals(table.schema)
# local filesystem specified with relative path
with change_cwd(tempdir):
dataset3 = ds.dataset("test.parquet", filesystem=fs.LocalFileSystem())
assert dataset3.schema.equals(table.schema)
# passing different filesystem
with pytest.raises(FileNotFoundError):
ds.dataset(str(path), filesystem=fs._MockFileSystem())
@pytest.mark.parquet
def test_open_dataset_unsupported_format(tempdir):
_, path = _create_single_file(tempdir)
with pytest.raises(ValueError, match="format 'blabla' is not supported"):
ds.dataset([path], format="blabla")
@pytest.mark.parquet
def test_open_union_dataset(tempdir, dataset_reader):
_, path = _create_single_file(tempdir)
dataset = ds.dataset(path)
union = ds.dataset([dataset, dataset])
assert isinstance(union, ds.UnionDataset)
pickled = pickle.loads(pickle.dumps(union))
assert dataset_reader.to_table(pickled) == dataset_reader.to_table(union)
def test_open_union_dataset_with_additional_kwargs(multisourcefs):
child = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')
with pytest.raises(ValueError, match="cannot pass any additional"):
ds.dataset([child], format="parquet")
def test_open_dataset_non_existing_file():
# ARROW-8213: Opening a dataset with a local incorrect path gives confusing
# error message
with pytest.raises(FileNotFoundError):
ds.dataset('i-am-not-existing.parquet', format='parquet')
with pytest.raises(pa.ArrowInvalid, match='cannot be relative'):
ds.dataset('file:i-am-not-existing.parquet', format='parquet')
@pytest.mark.parquet
@pytest.mark.parametrize('partitioning', ["directory", "hive"])
@pytest.mark.parametrize('null_fallback', ['xyz', None])
@pytest.mark.parametrize('infer_dictionary', [False, True])
@pytest.mark.parametrize('partition_keys', [
(["A", "B", "C"], [1, 2, 3]),
([1, 2, 3], ["A", "B", "C"]),
(["A", "B", "C"], ["D", "E", "F"]),
([1, 2, 3], [4, 5, 6]),
([1, None, 3], ["A", "B", "C"]),
([1, 2, 3], ["A", None, "C"]),
([None, 2, 3], [None, 2, 3]),
])
def test_partition_discovery(
tempdir, partitioning, null_fallback, infer_dictionary, partition_keys
):
# ARROW-9288 / ARROW-9476
import pyarrow.parquet as pq
table = pa.table({'a': range(9), 'b': [0.0] * 4 + [1.0] * 5})
has_null = None in partition_keys[0] or None in partition_keys[1]
if partitioning == "directory" and has_null:
# Directory partitioning can't handle the first part being null
return
if partitioning == "directory":
partitioning = ds.DirectoryPartitioning.discover(
["part1", "part2"], infer_dictionary=infer_dictionary)
fmt = "{0}/{1}"
null_value = None
else:
if null_fallback:
partitioning = ds.HivePartitioning.discover(
infer_dictionary=infer_dictionary, null_fallback=null_fallback
)
else:
partitioning = ds.HivePartitioning.discover(
infer_dictionary=infer_dictionary)
fmt = "part1={0}/part2={1}"
if null_fallback:
null_value = null_fallback
else:
null_value = "__HIVE_DEFAULT_PARTITION__"
basepath = tempdir / "dataset"
basepath.mkdir()
part_keys1, part_keys2 = partition_keys
for part1 in part_keys1:
for part2 in part_keys2:
path = basepath / \
fmt.format(part1 or null_value, part2 or null_value)
path.mkdir(parents=True)
pq.write_table(table, path / "test.parquet")
dataset = ds.dataset(str(basepath), partitioning=partitioning)
def expected_type(key):
if infer_dictionary:
value_type = pa.string() if isinstance(key, str) else pa.int32()
return pa.dictionary(pa.int32(), value_type)
else:
return pa.string() if isinstance(key, str) else pa.int32()
expected_schema = table.schema.append(
pa.field("part1", expected_type(part_keys1[0]))
).append(
pa.field("part2", expected_type(part_keys2[0]))
)
assert dataset.schema.equals(expected_schema)
@pytest.mark.pandas
def test_dataset_partitioned_dictionary_type_reconstruct(tempdir):
# https://issues.apache.org/jira/browse/ARROW-11400
table = pa.table({'part': np.repeat(['A', 'B'], 5), 'col': range(10)})
part = ds.partitioning(table.select(['part']).schema, flavor="hive")
ds.write_dataset(table, tempdir, partitioning=part, format="feather")
dataset = ds.dataset(
tempdir, format="feather",
partitioning=ds.HivePartitioning.discover(infer_dictionary=True)
)
expected = pa.table(
{'col': table['col'], 'part': table['part'].dictionary_encode()}
)
assert dataset.to_table().equals(expected)
fragment = list(dataset.get_fragments())[0]
assert fragment.to_table(schema=dataset.schema).equals(expected[:5])
part_expr = fragment.partition_expression
restored = pickle.loads(pickle.dumps(dataset))
assert restored.to_table().equals(expected)
restored = pickle.loads(pickle.dumps(fragment))
assert restored.to_table(schema=dataset.schema).equals(expected[:5])
# to_pandas call triggers computation of the actual dictionary values
assert restored.to_table(schema=dataset.schema).to_pandas().equals(
expected[:5].to_pandas()
)
assert restored.partition_expression.equals(part_expr)
@pytest.fixture
def s3_example_simple(s3_server):
from pyarrow.fs import FileSystem
import pyarrow.parquet as pq
host, port, access_key, secret_key = s3_server['connection']
uri = (
"s3://{}:{}@mybucket/data.parquet?scheme=http&endpoint_override={}:{}"
.format(access_key, secret_key, host, port)
)
fs, path = FileSystem.from_uri(uri)
fs.create_dir("mybucket")
table = pa.table({'a': [1, 2, 3]})
with fs.open_output_stream("mybucket/data.parquet") as out:
pq.write_table(table, out)
return table, path, fs, uri, host, port, access_key, secret_key
@pytest.mark.parquet
@pytest.mark.s3
def test_open_dataset_from_uri_s3(s3_example_simple, dataset_reader):
# open dataset from non-localfs string path
table, path, fs, uri, _, _, _, _ = s3_example_simple
# full string URI
dataset = ds.dataset(uri, format="parquet")
assert dataset_reader.to_table(dataset).equals(table)
# passing filesystem object
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset_reader.to_table(dataset).equals(table)
@pytest.mark.parquet
@pytest.mark.s3 # still needed to create the data
def test_open_dataset_from_uri_s3_fsspec(s3_example_simple):
table, path, _, _, host, port, access_key, secret_key = s3_example_simple
s3fs = pytest.importorskip("s3fs")
from pyarrow.fs import PyFileSystem, FSSpecHandler
fs = s3fs.S3FileSystem(
key=access_key,
secret=secret_key,
client_kwargs={
'endpoint_url': 'http://{}:{}'.format(host, port)
}
)
# passing as fsspec filesystem
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset.to_table().equals(table)
# directly passing the fsspec-handler
fs = PyFileSystem(FSSpecHandler(fs))
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset.to_table().equals(table)
@pytest.mark.parquet
@pytest.mark.s3
def test_open_dataset_from_s3_with_filesystem_uri(s3_server):
from pyarrow.fs import FileSystem
import pyarrow.parquet as pq
host, port, access_key, secret_key = s3_server['connection']
bucket = 'theirbucket'
path = 'nested/folder/data.parquet'
uri = "s3://{}:{}@{}/{}?scheme=http&endpoint_override={}:{}".format(
access_key, secret_key, bucket, path, host, port
)
fs, path = FileSystem.from_uri(uri)
assert path == 'theirbucket/nested/folder/data.parquet'
fs.create_dir(bucket)
table = pa.table({'a': [1, 2, 3]})
with fs.open_output_stream(path) as out:
pq.write_table(table, out)
# full string URI
dataset = ds.dataset(uri, format="parquet")
assert dataset.to_table().equals(table)
# passing filesystem as an uri
template = (
"s3://{}:{}@{{}}?scheme=http&endpoint_override={}:{}".format(
access_key, secret_key, host, port
)
)
cases = [
('theirbucket/nested/folder/', '/data.parquet'),
('theirbucket/nested/folder', 'data.parquet'),
('theirbucket/nested/', 'folder/data.parquet'),
('theirbucket/nested', 'folder/data.parquet'),
('theirbucket', '/nested/folder/data.parquet'),
('theirbucket', 'nested/folder/data.parquet'),
]
for prefix, path in cases:
uri = template.format(prefix)
dataset = ds.dataset(path, filesystem=uri, format="parquet")
assert dataset.to_table().equals(table)
with pytest.raises(pa.ArrowInvalid, match='Missing bucket name'):
uri = template.format('/')
ds.dataset('/theirbucket/nested/folder/data.parquet', filesystem=uri)
error = (
"The path component of the filesystem URI must point to a directory "
"but it has a type: `{}`. The path component is `{}` and the given "
"filesystem URI is `{}`"
)
path = 'theirbucket/doesnt/exist'
uri = template.format(path)
with pytest.raises(ValueError) as exc:
ds.dataset('data.parquet', filesystem=uri)
assert str(exc.value) == error.format('NotFound', path, uri)
path = 'theirbucket/nested/folder/data.parquet'
uri = template.format(path)
with pytest.raises(ValueError) as exc:
ds.dataset('data.parquet', filesystem=uri)
assert str(exc.value) == error.format('File', path, uri)
@pytest.mark.parquet
def test_open_dataset_from_fsspec(tempdir):
table, path = _create_single_file(tempdir)
fsspec = pytest.importorskip("fsspec")
localfs = fsspec.filesystem("file")
dataset = ds.dataset(path, filesystem=localfs)
assert dataset.schema.equals(table.schema)
@pytest.mark.pandas
def test_filter_timestamp(tempdir, dataset_reader):
# ARROW-11379
path = tempdir / "test_partition_timestamps"
table = pa.table({
"dates": ['2012-01-01', '2012-01-02'] * 5,
"id": range(10)})
# write dataset partitioned on dates (as strings)
part = ds.partitioning(table.select(['dates']).schema, flavor="hive")
ds.write_dataset(table, path, partitioning=part, format="feather")
# read dataset partitioned on dates (as timestamps)
part = ds.partitioning(pa.schema([("dates", pa.timestamp("s"))]),
flavor="hive")
dataset = ds.dataset(path, format="feather", partitioning=part)
condition = ds.field("dates") > pd.Timestamp("2012-01-01")
table = dataset_reader.to_table(dataset, filter=condition)
assert table.column('id').to_pylist() == [1, 3, 5, 7, 9]
import datetime
condition = ds.field("dates") > datetime.datetime(2012, 1, 1)
table = dataset_reader.to_table(dataset, filter=condition)
assert table.column('id').to_pylist() == [1, 3, 5, 7, 9]
@pytest.mark.parquet
def test_filter_implicit_cast(tempdir, dataset_reader):
# ARROW-7652
table = pa.table({'a': pa.array([0, 1, 2, 3, 4, 5], type=pa.int8())})
_, path = _create_single_file(tempdir, table)
dataset = ds.dataset(str(path))
filter_ = ds.field('a') > 2
assert len(dataset_reader.to_table(dataset, filter=filter_)) == 3
def test_dataset_union(multisourcefs):
child = ds.FileSystemDatasetFactory(
multisourcefs, fs.FileSelector('/plain'),
format=ds.ParquetFileFormat()
)
factory = ds.UnionDatasetFactory([child])
# TODO(bkietz) reintroduce factory.children property
assert len(factory.inspect_schemas()) == 1
assert all(isinstance(s, pa.Schema) for s in factory.inspect_schemas())
assert factory.inspect_schemas()[0].equals(child.inspect())
assert factory.inspect().equals(child.inspect())
assert isinstance(factory.finish(), ds.Dataset)
def test_union_dataset_from_other_datasets(tempdir, multisourcefs):
child1 = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')
child2 = ds.dataset('/schema', filesystem=multisourcefs, format='parquet',
partitioning=['week', 'color'])
child3 = ds.dataset('/hive', filesystem=multisourcefs, format='parquet',
partitioning='hive')
assert child1.schema != child2.schema != child3.schema
assembled = ds.dataset([child1, child2, child3])
assert isinstance(assembled, ds.UnionDataset)
msg = 'cannot pass any additional arguments'
with pytest.raises(ValueError, match=msg):
ds.dataset([child1, child2], filesystem=multisourcefs)
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('week', pa.int32()),
('year', pa.int32()),
('month', pa.int32()),
])
assert assembled.schema.equals(expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
assembled = ds.dataset([child1, child3])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('year', pa.int32()),
('month', pa.int32()),
])
assert assembled.schema.equals(expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
expected_schema = pa.schema([
('month', pa.int32()),
('color', pa.string()),
('date', pa.date32()),
])
assembled = ds.dataset([child1, child3], schema=expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
expected_schema = pa.schema([
('month', pa.int32()),
('color', pa.string()),
('unknown', pa.string()) # fill with nulls
])
assembled = ds.dataset([child1, child3], schema=expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
# incompatible schemas, date and index columns have conflicting types
table = pa.table([range(9), [0.] * 4 + [1.] * 5, 'abcdefghj'],
names=['date', 'value', 'index'])
_, path = _create_single_file(tempdir, table=table)
child4 = ds.dataset(path)
with pytest.raises(pa.ArrowInvalid, match='Unable to merge'):
ds.dataset([child1, child4])
def test_dataset_from_a_list_of_local_directories_raises(multisourcefs):
msg = 'points to a directory, but only file paths are supported'
with pytest.raises(IsADirectoryError, match=msg):
ds.dataset(['/plain', '/schema', '/hive'], filesystem=multisourcefs)
def test_union_dataset_filesystem_datasets(multisourcefs):
# without partitioning
dataset = ds.dataset([
ds.dataset('/plain', filesystem=multisourcefs),
ds.dataset('/schema', filesystem=multisourcefs),
ds.dataset('/hive', filesystem=multisourcefs),
])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
])
assert dataset.schema.equals(expected_schema)
# with hive partitioning for two hive sources
dataset = ds.dataset([
ds.dataset('/plain', filesystem=multisourcefs),
ds.dataset('/schema', filesystem=multisourcefs),
ds.dataset('/hive', filesystem=multisourcefs, partitioning='hive')
])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('year', pa.int32()),
('month', pa.int32()),
])
assert dataset.schema.equals(expected_schema)
@pytest.mark.parquet
def test_specified_schema(tempdir, dataset_reader):
import pyarrow.parquet as pq
table = pa.table({'a': [1, 2, 3], 'b': [.1, .2, .3]})
pq.write_table(table, tempdir / "data.parquet")
def _check_dataset(schema, expected, expected_schema=None):
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
if expected_schema is not None:
assert dataset.schema.equals(expected_schema)
else:
assert dataset.schema.equals(schema)
result = dataset_reader.to_table(dataset)
assert result.equals(expected)
# no schema specified
schema = None
expected = table
_check_dataset(schema, expected, expected_schema=table.schema)
# identical schema specified
schema = table.schema
expected = table
_check_dataset(schema, expected)
# Specifying schema with change column order
schema = pa.schema([('b', 'float64'), ('a', 'int64')])
expected = pa.table([[.1, .2, .3], [1, 2, 3]], names=['b', 'a'])
_check_dataset(schema, expected)
# Specifying schema with missing column
schema = pa.schema([('a', 'int64')])
expected = pa.table([[1, 2, 3]], names=['a'])
_check_dataset(schema, expected)
# Specifying schema with additional column
schema = pa.schema([('a', 'int64'), ('c', 'int32')])
expected = pa.table([[1, 2, 3],
pa.array([None, None, None], type='int32')],
names=['a', 'c'])
_check_dataset(schema, expected)
# Specifying with differing field types
schema = pa.schema([('a', 'int32'), ('b', 'float64')])
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
expected = pa.table([table['a'].cast('int32'),
table['b']],
names=['a', 'b'])
_check_dataset(schema, expected)
# Specifying with incompatible schema
schema = pa.schema([('a', pa.list_(pa.int32())), ('b', 'float64')])
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
assert dataset.schema.equals(schema)
with pytest.raises(NotImplementedError,
match='Unsupported cast from int64 to list'):
dataset_reader.to_table(dataset)
@pytest.mark.parquet
def test_incompatible_schema_hang(tempdir, dataset_reader):
# ARROW-13480: deadlock when reading past an errored fragment
import pyarrow.parquet as pq
fn = tempdir / "data.parquet"
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, fn)
schema = pa.schema([('a', pa.null())])
dataset = ds.dataset([str(fn)] * 100, schema=schema)
assert dataset.schema.equals(schema)
scanner = dataset_reader.scanner(dataset)
reader = scanner.to_reader()
with pytest.raises(NotImplementedError,
match='Unsupported cast from int64 to null'):
reader.read_all()
def test_ipc_format(tempdir, dataset_reader):
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.arrow')
with pa.output_stream(path) as sink:
writer = pa.RecordBatchFileWriter(sink, table.schema)
writer.write_batch(table.to_batches()[0])
writer.close()
dataset = ds.dataset(path, format=ds.IpcFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
for format_str in ["ipc", "arrow"]:
dataset = ds.dataset(path, format=format_str)
result = dataset_reader.to_table(dataset)
assert result.equals(table)
@pytest.mark.orc
def test_orc_format(tempdir, dataset_reader):
from pyarrow import orc
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.orc')
orc.write_table(table, path)
dataset = ds.dataset(path, format=ds.OrcFileFormat())
result = dataset_reader.to_table(dataset)
result.validate(full=True)
assert result.equals(table)
dataset = ds.dataset(path, format="orc")
result = dataset_reader.to_table(dataset)
result.validate(full=True)
assert result.equals(table)
result = dataset_reader.to_table(dataset, columns=["b"])
result.validate(full=True)
assert result.equals(table.select(["b"]))
assert dataset_reader.count_rows(dataset) == 3
assert dataset_reader.count_rows(dataset, filter=ds.field("a") > 2) == 1
@pytest.mark.orc
def test_orc_scan_options(tempdir, dataset_reader):
from pyarrow import orc
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.orc')
orc.write_table(table, path)
dataset = ds.dataset(path, format="orc")
result = list(dataset_reader.to_batches(dataset))
assert len(result) == 1
assert result[0].num_rows == 3
assert result[0].equals(table.to_batches()[0])
# TODO batch_size is not yet supported (ARROW-14153)
# result = list(dataset_reader.to_batches(dataset, batch_size=2))
# assert len(result) == 2
# assert result[0].num_rows == 2
# assert result[0].equals(table.slice(0, 2).to_batches()[0])
# assert result[1].num_rows == 1
# assert result[1].equals(table.slice(2, 1).to_batches()[0])
def test_orc_format_not_supported():
try:
from pyarrow.dataset import OrcFileFormat # noqa
except (ImportError, AttributeError):
# catch AttributeError for Python 3.6
# ORC is not available, test error message
with pytest.raises(
ValueError, match="not built with support for the ORC file"
):
ds.dataset(".", format="orc")
@pytest.mark.pandas
def test_csv_format(tempdir, dataset_reader):
table = pa.table({'a': pa.array([1, 2, 3], type="int64"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.csv')
table.to_pandas().to_csv(path, index=False)
dataset = ds.dataset(path, format=ds.CsvFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
dataset = ds.dataset(path, format='csv')
result = dataset_reader.to_table(dataset)
assert result.equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize("compression", [
"bz2",
"gzip",
"lz4",
"zstd",
])
def test_csv_format_compressed(tempdir, compression, dataset_reader):
if not pyarrow.Codec.is_available(compression):
pytest.skip("{} support is not built".format(compression))
table = pa.table({'a': pa.array([1, 2, 3], type="int64"),
'b': pa.array([.1, .2, .3], type="float64")})
filesystem = fs.LocalFileSystem()
suffix = compression if compression != 'gzip' else 'gz'
path = str(tempdir / f'test.csv.{suffix}')
with filesystem.open_output_stream(path, compression=compression) as sink:
# https://github.com/pandas-dev/pandas/issues/23854
# With CI version of Pandas (anything < 1.2), Pandas tries to write
# str to the sink
csv_str = table.to_pandas().to_csv(index=False)
sink.write(csv_str.encode('utf-8'))
dataset = ds.dataset(path, format=ds.CsvFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
def test_csv_format_options(tempdir, dataset_reader):
path = str(tempdir / 'test.csv')
with open(path, 'w') as sink:
sink.write('skipped\ncol0\nfoo\nbar\n')
dataset = ds.dataset(path, format='csv')
result = dataset_reader.to_table(dataset)
assert result.equals(
pa.table({'skipped': pa.array(['col0', 'foo', 'bar'])}))
dataset = ds.dataset(path, format=ds.CsvFileFormat(
read_options=pa.csv.ReadOptions(skip_rows=1)))
result = dataset_reader.to_table(dataset)
assert result.equals(pa.table({'col0': pa.array(['foo', 'bar'])}))
dataset = ds.dataset(path, format=ds.CsvFileFormat(
read_options=pa.csv.ReadOptions(column_names=['foo'])))
result = dataset_reader.to_table(dataset)
assert result.equals(
pa.table({'foo': pa.array(['skipped', 'col0', 'foo', 'bar'])}))
def test_csv_fragment_options(tempdir, dataset_reader):
path = str(tempdir / 'test.csv')
with open(path, 'w') as sink:
sink.write('col0\nfoo\nspam\nMYNULL\n')
dataset = ds.dataset(path, format='csv')
convert_options = pyarrow.csv.ConvertOptions(null_values=['MYNULL'],
strings_can_be_null=True)
options = ds.CsvFragmentScanOptions(
convert_options=convert_options,
read_options=pa.csv.ReadOptions(block_size=2**16))
result = dataset_reader.to_table(dataset, fragment_scan_options=options)
assert result.equals(pa.table({'col0': pa.array(['foo', 'spam', None])}))
csv_format = ds.CsvFileFormat(convert_options=convert_options)
dataset = ds.dataset(path, format=csv_format)
result = dataset_reader.to_table(dataset)
assert result.equals(pa.table({'col0': pa.array(['foo', 'spam', None])}))
options = ds.CsvFragmentScanOptions()
result = dataset_reader.to_table(dataset, fragment_scan_options=options)
assert result.equals(
pa.table({'col0': pa.array(['foo', 'spam', 'MYNULL'])}))
def test_feather_format(tempdir, dataset_reader):
from pyarrow.feather import write_feather
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
basedir = tempdir / "feather_dataset"
basedir.mkdir()
write_feather(table, str(basedir / "data.feather"))
dataset = ds.dataset(basedir, format=ds.IpcFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
dataset = ds.dataset(basedir, format="feather")
result = dataset_reader.to_table(dataset)
assert result.equals(table)
# ARROW-8641 - column selection order
result = dataset_reader.to_table(dataset, columns=["b", "a"])
assert result.column_names == ["b", "a"]
result = dataset_reader.to_table(dataset, columns=["a", "a"])
assert result.column_names == ["a", "a"]
# error with Feather v1 files
write_feather(table, str(basedir / "data1.feather"), version=1)
with pytest.raises(ValueError):
dataset_reader.to_table(ds.dataset(basedir, format="feather"))
def _create_parquet_dataset_simple(root_path):
"""
Creates a simple (flat files, no nested partitioning) Parquet dataset
"""
import pyarrow.parquet as pq
metadata_collector = []
for i in range(4):
table = pa.table({'f1': [i] * 10, 'f2': np.random.randn(10)})
pq.write_to_dataset(
table, str(root_path), metadata_collector=metadata_collector
)
metadata_path = str(root_path / '_metadata')
# write _metadata file
pq.write_metadata(
table.schema, metadata_path,
metadata_collector=metadata_collector
)
return metadata_path, table
@pytest.mark.parquet
@pytest.mark.pandas # write_to_dataset currently requires pandas
def test_parquet_dataset_factory(tempdir):
root_path = tempdir / "test_parquet_dataset"
metadata_path, table = _create_parquet_dataset_simple(root_path)
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 4
result = dataset.to_table()
assert result.num_rows == 40
@pytest.mark.parquet
@pytest.mark.pandas # write_to_dataset currently requires pandas
@pytest.mark.parametrize('use_legacy_dataset', [False, True])
def test_parquet_dataset_factory_roundtrip(tempdir, use_legacy_dataset):
# Simple test to ensure we can roundtrip dataset to
# _metadata/common_metadata and back. A more complex test
# using partitioning will have to wait for ARROW-13269. The
# above test (test_parquet_dataset_factory) will not work
# when legacy is False as there is no "append" equivalent in
# the new dataset until ARROW-12358
import pyarrow.parquet as pq
root_path = tempdir / "test_parquet_dataset"
table = pa.table({'f1': [0] * 10, 'f2': np.random.randn(10)})
metadata_collector = []
pq.write_to_dataset(
table, str(root_path), metadata_collector=metadata_collector,
use_legacy_dataset=use_legacy_dataset
)
metadata_path = str(root_path / '_metadata')
# write _metadata file
pq.write_metadata(
table.schema, metadata_path,
metadata_collector=metadata_collector
)
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
result = dataset.to_table()
assert result.num_rows == 10
def test_parquet_dataset_factory_order(tempdir):
# The order of the fragments in the dataset should match the order of the
# row groups in the _metadata file.
import pyarrow.parquet as pq
metadatas = []
# Create a dataset where f1 is incrementing from 0 to 100 spread across
# 10 files. Put the row groups in the correct order in _metadata
for i in range(10):
table = pa.table(
{'f1': list(range(i*10, (i+1)*10))})
table_path = tempdir / f'{i}.parquet'
pq.write_table(table, table_path, metadata_collector=metadatas)
metadatas[-1].set_file_path(f'{i}.parquet')
metadata_path = str(tempdir / '_metadata')
pq.write_metadata(table.schema, metadata_path, metadatas)
dataset = ds.parquet_dataset(metadata_path)
# Ensure the table contains values from 0-100 in the right order
scanned_table = dataset.to_table()
scanned_col = scanned_table.column('f1').to_pylist()
assert scanned_col == list(range(0, 100))
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_invalid(tempdir):
root_path = tempdir / "test_parquet_dataset_invalid"
metadata_path, table = _create_parquet_dataset_simple(root_path)
# remove one of the files
list(root_path.glob("*.parquet"))[0].unlink()
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 4
with pytest.raises(FileNotFoundError):
dataset.to_table()
def _create_metadata_file(root_path):
# create _metadata file from existing parquet dataset
import pyarrow.parquet as pq
parquet_paths = list(sorted(root_path.rglob("*.parquet")))
schema = pq.ParquetFile(parquet_paths[0]).schema.to_arrow_schema()
metadata_collector = []
for path in parquet_paths:
metadata = pq.ParquetFile(path).metadata
metadata.set_file_path(str(path.relative_to(root_path)))
metadata_collector.append(metadata)
metadata_path = root_path / "_metadata"
pq.write_metadata(
schema, metadata_path, metadata_collector=metadata_collector
)
return metadata_path
def _create_parquet_dataset_partitioned(root_path):
import pyarrow.parquet as pq
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))],
names=["f1", "f2", "part"]
)
table = table.replace_schema_metadata({"key": "value"})
pq.write_to_dataset(table, str(root_path), partition_cols=['part'])
return _create_metadata_file(root_path), table
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_partitioned(tempdir):
root_path = tempdir / "test_parquet_dataset_factory_partitioned"
metadata_path, table = _create_parquet_dataset_partitioned(root_path)
partitioning = ds.partitioning(flavor="hive")
dataset = ds.parquet_dataset(metadata_path, partitioning=partitioning)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 2
result = dataset.to_table()
assert result.num_rows == 20
# the partitioned dataset does not preserve order
result = result.to_pandas().sort_values("f1").reset_index(drop=True)
expected = table.to_pandas()
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_metadata(tempdir):
# ensure ParquetDatasetFactory preserves metadata (ARROW-9363)
root_path = tempdir / "test_parquet_dataset_factory_metadata"
metadata_path, table = _create_parquet_dataset_partitioned(root_path)
dataset = ds.parquet_dataset(metadata_path, partitioning="hive")
assert dataset.schema.equals(table.schema)
assert b"key" in dataset.schema.metadata
fragments = list(dataset.get_fragments())
assert b"key" in fragments[0].physical_schema.metadata
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_lazy_filtering(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
# Test to ensure that no IO happens when filtering a dataset
# created with ParquetDatasetFactory from a _metadata file
root_path = tempdir / "test_parquet_dataset_lazy_filtering"
metadata_path, _ = _create_parquet_dataset_simple(root_path)
# creating the dataset should only open the metadata file
with assert_opens([metadata_path]):
dataset = ds.parquet_dataset(
metadata_path,
partitioning=ds.partitioning(flavor="hive"),
filesystem=fs)
# materializing fragments should not open any file
with assert_opens([]):
fragments = list(dataset.get_fragments())
# filtering fragments should not open any file
with assert_opens([]):
list(dataset.get_fragments(ds.field("f1") > 15))
# splitting by row group should still not open any file
with assert_opens([]):
fragments[0].split_by_row_group(ds.field("f1") > 15)
# ensuring metadata of splitted fragment should also not open any file
with assert_opens([]):
rg_fragments = fragments[0].split_by_row_group()
rg_fragments[0].ensure_complete_metadata()
# FIXME(bkietz) on Windows this results in FileNotFoundErrors.
# but actually scanning does open files
# with assert_opens([f.path for f in fragments]):
# dataset.to_table()
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_schema_metadata(tempdir, dataset_reader):
# ARROW-8802
df = pd.DataFrame({'a': [1, 2, 3]})
path = tempdir / "test.parquet"
df.to_parquet(path)
dataset = ds.dataset(path)
schema = dataset_reader.to_table(dataset).schema
projected_schema = dataset_reader.to_table(dataset, columns=["a"]).schema
# ensure the pandas metadata is included in the schema
assert b"pandas" in schema.metadata
# ensure it is still there in a projected schema (with column selection)
assert schema.equals(projected_schema, check_metadata=True)
@pytest.mark.parquet
def test_filter_mismatching_schema(tempdir, dataset_reader):
# ARROW-9146
import pyarrow.parquet as pq
table = pa.table({"col": pa.array([1, 2, 3, 4], type='int32')})
pq.write_table(table, str(tempdir / "data.parquet"))
# specifying explicit schema, but that mismatches the schema of the data
schema = pa.schema([("col", pa.int64())])
dataset = ds.dataset(
tempdir / "data.parquet", format="parquet", schema=schema)
# filtering on a column with such type mismatch should implicitly
# cast the column
filtered = dataset_reader.to_table(dataset, filter=ds.field("col") > 2)
assert filtered["col"].equals(table["col"].cast('int64').slice(2))
fragment = list(dataset.get_fragments())[0]
filtered = dataset_reader.to_table(
fragment, filter=ds.field("col") > 2, schema=schema)
assert filtered["col"].equals(table["col"].cast('int64').slice(2))
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_project_only_partition_columns(tempdir, dataset_reader):
# ARROW-8729
import pyarrow.parquet as pq
table = pa.table({'part': 'a a b b'.split(), 'col': list(range(4))})
path = str(tempdir / 'test_dataset')
pq.write_to_dataset(table, path, partition_cols=['part'])
dataset = ds.dataset(path, partitioning='hive')
all_cols = dataset_reader.to_table(dataset)
part_only = dataset_reader.to_table(dataset, columns=['part'])
assert all_cols.column('part').equals(part_only.column('part'))
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_project_null_column(tempdir, dataset_reader):
import pandas as pd
df = pd.DataFrame({"col": np.array([None, None, None], dtype='object')})
f = tempdir / "test_dataset_project_null_column.parquet"
df.to_parquet(f, engine="pyarrow")
dataset = ds.dataset(f, format="parquet",
schema=pa.schema([("col", pa.int64())]))
expected = pa.table({'col': pa.array([None, None, None], pa.int64())})
assert dataset_reader.to_table(dataset).equals(expected)
def test_dataset_project_columns(tempdir, dataset_reader):
# basic column re-projection with expressions
from pyarrow import feather
table = pa.table({"A": [1, 2, 3], "B": [1., 2., 3.], "C": ["a", "b", "c"]})
feather.write_feather(table, tempdir / "data.feather")
dataset = ds.dataset(tempdir / "data.feather", format="feather")
result = dataset_reader.to_table(dataset, columns={
'A_renamed': ds.field('A'),
'B_as_int': ds.field('B').cast("int32", safe=False),
'C_is_a': ds.field('C') == 'a'
})
expected = pa.table({
"A_renamed": [1, 2, 3],
"B_as_int": pa.array([1, 2, 3], type="int32"),
"C_is_a": [True, False, False],
})
assert result.equals(expected)
# raise proper error when not passing an expression
with pytest.raises(TypeError, match="Expected an Expression"):
dataset_reader.to_table(dataset, columns={"A": "A"})
@pytest.mark.pandas
@pytest.mark.parquet
def test_dataset_preserved_partitioning(tempdir):
# ARROW-8655
# through discovery, but without partitioning
_, path = _create_single_file(tempdir)
dataset = ds.dataset(path)
assert dataset.partitioning is None
# through discovery, with hive partitioning but not specified
full_table, path = _create_partitioned_dataset(tempdir)
dataset = ds.dataset(path)
assert dataset.partitioning is None
# through discovery, with hive partitioning (from a partitioning factory)
dataset = ds.dataset(path, partitioning="hive")
part = dataset.partitioning
assert part is not None
assert isinstance(part, ds.HivePartitioning)
assert part.schema == pa.schema([("part", pa.int32())])
assert len(part.dictionaries) == 1
assert part.dictionaries[0] == pa.array([0, 1, 2], pa.int32())
# through discovery, with hive partitioning (from a partitioning object)
part = ds.partitioning(pa.schema([("part", pa.int32())]), flavor="hive")
assert isinstance(part, ds.HivePartitioning) # not a factory
assert part.dictionaries is None
dataset = ds.dataset(path, partitioning=part)
part = dataset.partitioning
assert isinstance(part, ds.HivePartitioning)
assert part.schema == pa.schema([("part", pa.int32())])
# TODO is this expected?
assert part.dictionaries is None
# through manual creation -> not available
dataset = ds.dataset(path, partitioning="hive")
dataset2 = ds.FileSystemDataset(
list(dataset.get_fragments()), schema=dataset.schema,
format=dataset.format, filesystem=dataset.filesystem
)
assert dataset2.partitioning is None
# through discovery with ParquetDatasetFactory
root_path = tempdir / "data-partitioned-metadata"
metadata_path, _ = _create_parquet_dataset_partitioned(root_path)
dataset = ds.parquet_dataset(metadata_path, partitioning="hive")
part = dataset.partitioning
assert part is not None
assert isinstance(part, ds.HivePartitioning)
assert part.schema == pa.schema([("part", pa.string())])
assert len(part.dictionaries) == 1
# will be fixed by ARROW-13153 (order is not preserved at the moment)
# assert part.dictionaries[0] == pa.array(["a", "b"], pa.string())
assert set(part.dictionaries[0].to_pylist()) == {"a", "b"}
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_given_null_just_works(tempdir):
import pyarrow.parquet as pq
schema = pa.schema([
pa.field('col', pa.int64()),
pa.field('part', pa.dictionary(pa.int32(), pa.string()))
])
table = pa.table({'part': [None, None, 'a', 'a'],
'col': list(range(4))}, schema=schema)
path = str(tempdir / 'test_dataset')
pq.write_to_dataset(table, path, partition_cols=[
'part'], use_legacy_dataset=False)
actual_table = pq.read_table(tempdir / 'test_dataset')
# column.equals can handle the difference in chunking but not the fact
# that `part` will have different dictionaries for the two chunks
assert actual_table.column('part').to_pylist(
) == table.column('part').to_pylist()
assert actual_table.column('col').equals(table.column('col'))
@pytest.mark.parquet
@pytest.mark.pandas
def test_legacy_write_to_dataset_drops_null(tempdir):
import pyarrow.parquet as pq
schema = pa.schema([
pa.field('col', pa.int64()),
pa.field('part', pa.dictionary(pa.int32(), pa.string()))
])
table = pa.table({'part': ['a', 'a', None, None],
'col': list(range(4))}, schema=schema)
expected = pa.table(
{'part': ['a', 'a'], 'col': list(range(2))}, schema=schema)
path = str(tempdir / 'test_dataset')
pq.write_to_dataset(table, path, partition_cols=[
'part'], use_legacy_dataset=True)
actual = pq.read_table(tempdir / 'test_dataset')
assert actual == expected
def _sort_table(tab, sort_col):
import pyarrow.compute as pc
sorted_indices = pc.sort_indices(
tab, options=pc.SortOptions([(sort_col, 'ascending')]))
return pc.take(tab, sorted_indices)
def _check_dataset_roundtrip(dataset, base_dir, expected_files, sort_col,
base_dir_path=None, partitioning=None):
base_dir_path = base_dir_path or base_dir
ds.write_dataset(dataset, base_dir, format="feather",
partitioning=partitioning, use_threads=False)
# check that all files are present
file_paths = list(base_dir_path.rglob("*"))
assert set(file_paths) == set(expected_files)
# check that reading back in as dataset gives the same result
dataset2 = ds.dataset(
base_dir_path, format="feather", partitioning=partitioning)
assert _sort_table(dataset2.to_table(), sort_col).equals(
_sort_table(dataset.to_table(), sort_col))
@pytest.mark.parquet
def test_write_dataset(tempdir):
# manually create a written dataset and read as dataset object
directory = tempdir / 'single-file'
directory.mkdir()
_ = _create_single_file(directory)
dataset = ds.dataset(directory)
# full string path
target = tempdir / 'single-file-target'
expected_files = [target / "part-0.feather"]
_check_dataset_roundtrip(dataset, str(target), expected_files, 'a', target)
# pathlib path object
target = tempdir / 'single-file-target2'
expected_files = [target / "part-0.feather"]
_check_dataset_roundtrip(dataset, target, expected_files, 'a', target)
# TODO
# # relative path
# target = tempdir / 'single-file-target3'
# expected_files = [target / "part-0.ipc"]
# _check_dataset_roundtrip(
# dataset, './single-file-target3', expected_files, target)
# Directory of files
directory = tempdir / 'single-directory'
directory.mkdir()
_ = _create_directory_of_files(directory)
dataset = ds.dataset(directory)
target = tempdir / 'single-directory-target'
expected_files = [target / "part-0.feather"]
_check_dataset_roundtrip(dataset, str(target), expected_files, 'a', target)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_partitioned(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
partitioning = ds.partitioning(flavor="hive")
dataset = ds.dataset(directory, partitioning=partitioning)
# hive partitioning
target = tempdir / 'partitioned-hive-target'
expected_paths = [
target / "part=a", target / "part=a" / "part-0.feather",
target / "part=b", target / "part=b" / "part-0.feather"
]
partitioning_schema = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
_check_dataset_roundtrip(
dataset, str(target), expected_paths, 'f1', target,
partitioning=partitioning_schema)
# directory partitioning
target = tempdir / 'partitioned-dir-target'
expected_paths = [
target / "a", target / "a" / "part-0.feather",
target / "b", target / "b" / "part-0.feather"
]
partitioning_schema = ds.partitioning(
pa.schema([("part", pa.string())]))
_check_dataset_roundtrip(
dataset, str(target), expected_paths, 'f1', target,
partitioning=partitioning_schema)
def test_write_dataset_with_field_names(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']})
ds.write_dataset(table, tempdir, format='parquet',
partitioning=["b"])
load_back = ds.dataset(tempdir, partitioning=["b"])
files = load_back.files
partitioning_dirs = {
str(pathlib.Path(f).relative_to(tempdir).parent) for f in files
}
assert partitioning_dirs == {"x", "y", "z"}
load_back_table = load_back.to_table()
assert load_back_table.equals(table)
def test_write_dataset_with_field_names_hive(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']})
ds.write_dataset(table, tempdir, format='parquet',
partitioning=["b"], partitioning_flavor="hive")
load_back = ds.dataset(tempdir, partitioning="hive")
files = load_back.files
partitioning_dirs = {
str(pathlib.Path(f).relative_to(tempdir).parent) for f in files
}
assert partitioning_dirs == {"b=x", "b=y", "b=z"}
load_back_table = load_back.to_table()
assert load_back_table.equals(table)
def test_write_dataset_with_scanner(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z'],
'c': [1, 2, 3]})
ds.write_dataset(table, tempdir, format='parquet',
partitioning=["b"])
dataset = ds.dataset(tempdir, partitioning=["b"])
with tempfile.TemporaryDirectory() as tempdir2:
ds.write_dataset(dataset.scanner(columns=["b", "c"], use_async=True),
tempdir2, format='parquet', partitioning=["b"])
load_back = ds.dataset(tempdir2, partitioning=["b"])
load_back_table = load_back.to_table()
assert dict(load_back_table.to_pydict()
) == table.drop(["a"]).to_pydict()
def test_write_dataset_with_dataset(tempdir):
table = pa.table({'b': ['x', 'y', 'z'], 'c': [1, 2, 3]})
ds.write_dataset(table, tempdir, format='parquet',
partitioning=["b"])
dataset = ds.dataset(tempdir, partitioning=["b"])
with tempfile.TemporaryDirectory() as tempdir2:
ds.write_dataset(dataset, tempdir2,
format='parquet', partitioning=["b"])
load_back = ds.dataset(tempdir2, partitioning=["b"])
load_back_table = load_back.to_table()
assert dict(load_back_table.to_pydict()) == table.to_pydict()
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_partitioned_dict(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
# directory partitioning, dictionary partition columns
dataset = ds.dataset(
directory,
partitioning=ds.HivePartitioning.discover(infer_dictionary=True))
target = tempdir / 'partitioned-dir-target'
expected_paths = [
target / "a", target / "a" / "part-0.feather",
target / "b", target / "b" / "part-0.feather"
]
partitioning = ds.partitioning(pa.schema([
dataset.schema.field('part')]),
dictionaries={'part': pa.array(['a', 'b'])})
# NB: dictionaries required here since we use partitioning to parse
# directories in _check_dataset_roundtrip (not currently required for
# the formatting step)
_check_dataset_roundtrip(
dataset, str(target), expected_paths, 'f1', target,
partitioning=partitioning)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_use_threads(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
dataset = ds.dataset(directory, partitioning="hive")
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
target1 = tempdir / 'partitioned1'
paths_written = []
def file_visitor(written_file):
paths_written.append(written_file.path)
ds.write_dataset(
dataset, target1, format="feather", partitioning=partitioning,
use_threads=True, file_visitor=file_visitor
)
expected_paths = {
target1 / 'part=a' / 'part-0.feather',
target1 / 'part=b' / 'part-0.feather'
}
paths_written_set = set(map(pathlib.Path, paths_written))
assert paths_written_set == expected_paths
target2 = tempdir / 'partitioned2'
ds.write_dataset(
dataset, target2, format="feather", partitioning=partitioning,
use_threads=False
)
# check that reading in gives same result
result1 = ds.dataset(target1, format="feather", partitioning=partitioning)
result2 = ds.dataset(target2, format="feather", partitioning=partitioning)
assert result1.to_table().equals(result2.to_table())
def test_write_table(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
base_dir = tempdir / 'single'
ds.write_dataset(table, base_dir,
basename_template='dat_{i}.arrow', format="feather")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "dat_0.arrow"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="ipc").to_table()
assert result.equals(table)
# with partitioning
base_dir = tempdir / 'partitioned'
expected_paths = [
base_dir / "part=a", base_dir / "part=a" / "dat_0.arrow",
base_dir / "part=b", base_dir / "part=b" / "dat_0.arrow"
]
visited_paths = []
def file_visitor(written_file):
visited_paths.append(written_file.path)
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
ds.write_dataset(table, base_dir, format="feather",
basename_template='dat_{i}.arrow',
partitioning=partitioning, file_visitor=file_visitor)
file_paths = list(base_dir.rglob("*"))
assert set(file_paths) == set(expected_paths)
result = ds.dataset(base_dir, format="ipc", partitioning=partitioning)
assert result.to_table().equals(table)
assert len(visited_paths) == 2
for visited_path in visited_paths:
assert pathlib.Path(visited_path) in expected_paths
def test_write_table_multiple_fragments(tempdir):
table = pa.table([
pa.array(range(10)), pa.array(np.random.randn(10)),
pa.array(np.repeat(['a', 'b'], 5))
], names=["f1", "f2", "part"])
table = pa.concat_tables([table]*2)
# Table with multiple batches written as single Fragment by default
base_dir = tempdir / 'single'
ds.write_dataset(table, base_dir, format="feather")
assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Same for single-element list of Table
base_dir = tempdir / 'single-list'
ds.write_dataset([table], base_dir, format="feather")
assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Provide list of batches to write multiple fragments
base_dir = tempdir / 'multiple'
ds.write_dataset(table.to_batches(), base_dir, format="feather")
assert set(base_dir.rglob("*")) == set(
[base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Provide list of tables to write multiple fragments
base_dir = tempdir / 'multiple-table'
ds.write_dataset([table, table], base_dir, format="feather")
assert set(base_dir.rglob("*")) == set(
[base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(
pa.concat_tables([table]*2)
)
def test_write_iterable(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
base_dir = tempdir / 'inmemory_iterable'
ds.write_dataset((batch for batch in table.to_batches()), base_dir,
schema=table.schema,
basename_template='dat_{i}.arrow', format="feather")
result = ds.dataset(base_dir, format="ipc").to_table()
assert result.equals(table)
base_dir = tempdir / 'inmemory_reader'
reader = pa.ipc.RecordBatchReader.from_batches(table.schema,
table.to_batches())
ds.write_dataset(reader, base_dir,
basename_template='dat_{i}.arrow', format="feather")
result = ds.dataset(base_dir, format="ipc").to_table()
assert result.equals(table)
def test_write_scanner(tempdir, dataset_reader):
if not dataset_reader.use_async:
pytest.skip(
('ARROW-13338: Write dataset with scanner does not'
' support synchronous scan'))
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
dataset = ds.dataset(table)
base_dir = tempdir / 'dataset_from_scanner'
ds.write_dataset(dataset_reader.scanner(
dataset), base_dir, format="feather")
result = dataset_reader.to_table(ds.dataset(base_dir, format="ipc"))
assert result.equals(table)
# scanner with different projected_schema
base_dir = tempdir / 'dataset_from_scanner2'
ds.write_dataset(dataset_reader.scanner(dataset, columns=["f1"]),
base_dir, format="feather")
result = dataset_reader.to_table(ds.dataset(base_dir, format="ipc"))
assert result.equals(table.select(["f1"]))
# schema not allowed when writing a scanner
with pytest.raises(ValueError, match="Cannot specify a schema"):
ds.write_dataset(dataset_reader.scanner(dataset), base_dir,
schema=table.schema, format="feather")
def test_write_table_partitioned_dict(tempdir):
# ensure writing table partitioned on a dictionary column works without
# specifying the dictionary values explicitly
table = pa.table([
pa.array(range(20)),
pa.array(np.repeat(['a', 'b'], 10)).dictionary_encode(),
], names=['col', 'part'])
partitioning = ds.partitioning(table.select(["part"]).schema)
base_dir = tempdir / "dataset"
ds.write_dataset(
table, base_dir, format="feather", partitioning=partitioning
)
# check roundtrip
partitioning_read = ds.DirectoryPartitioning.discover(
["part"], infer_dictionary=True)
result = ds.dataset(
base_dir, format="ipc", partitioning=partitioning_read
).to_table()
assert result.equals(table)
@pytest.mark.parquet
def test_write_dataset_parquet(tempdir):
import pyarrow.parquet as pq
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
# using default "parquet" format string
base_dir = tempdir / 'parquet_dataset'
ds.write_dataset(table, base_dir, format="parquet")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "part-0.parquet"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="parquet").to_table()
assert result.equals(table)
# using custom options
for version in ["1.0", "2.4", "2.6"]:
format = ds.ParquetFileFormat()
opts = format.make_write_options(version=version)
base_dir = tempdir / 'parquet_dataset_version{0}'.format(version)
ds.write_dataset(table, base_dir, format=format, file_options=opts)
meta = pq.read_metadata(base_dir / "part-0.parquet")
expected_version = "1.0" if version == "1.0" else "2.6"
assert meta.format_version == expected_version
def test_write_dataset_csv(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "chr1"])
base_dir = tempdir / 'csv_dataset'
ds.write_dataset(table, base_dir, format="csv")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "part-0.csv"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="csv").to_table()
assert result.equals(table)
# using custom options
format = ds.CsvFileFormat(read_options=pyarrow.csv.ReadOptions(
column_names=table.schema.names))
opts = format.make_write_options(include_header=False)
base_dir = tempdir / 'csv_dataset_noheader'
ds.write_dataset(table, base_dir, format=format, file_options=opts)
result = ds.dataset(base_dir, format=format).to_table()
assert result.equals(table)
@pytest.mark.parquet
def test_write_dataset_parquet_file_visitor(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
visitor_called = False
def file_visitor(written_file):
nonlocal visitor_called
if (written_file.metadata is not None and
written_file.metadata.num_columns == 3):
visitor_called = True
base_dir = tempdir / 'parquet_dataset'
ds.write_dataset(table, base_dir, format="parquet",
file_visitor=file_visitor)
assert visitor_called
def test_partition_dataset_parquet_file_visitor(tempdir):
f1_vals = [item for chunk in range(4) for item in [chunk] * 10]
f2_vals = [item*10 for chunk in range(4) for item in [chunk] * 10]
table = pa.table({'f1': f1_vals, 'f2': f2_vals,
'part': np.repeat(['a', 'b'], 20)})
root_path = tempdir / 'partitioned'
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
paths_written = []
sample_metadata = None
def file_visitor(written_file):
nonlocal sample_metadata
if written_file.metadata:
sample_metadata = written_file.metadata
paths_written.append(written_file.path)
ds.write_dataset(
table, root_path, format="parquet", partitioning=partitioning,
use_threads=True, file_visitor=file_visitor
)
expected_paths = {
root_path / 'part=a' / 'part-0.parquet',
root_path / 'part=b' / 'part-0.parquet'
}
paths_written_set = set(map(pathlib.Path, paths_written))
assert paths_written_set == expected_paths
assert sample_metadata is not None
assert sample_metadata.num_columns == 2
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_arrow_schema_metadata(tempdir):
# ensure we serialize ARROW schema in the parquet metadata, to have a
# correct roundtrip (e.g. preserve non-UTC timezone)
import pyarrow.parquet as pq
table = pa.table({"a": [pd.Timestamp("2012-01-01", tz="Europe/Brussels")]})
assert table["a"].type.tz == "Europe/Brussels"
ds.write_dataset(table, tempdir, format="parquet")
result = pq.read_table(tempdir / "part-0.parquet")
assert result["a"].type.tz == "Europe/Brussels"
def test_write_dataset_schema_metadata(tempdir):
# ensure that schema metadata gets written
from pyarrow import feather
table = pa.table({'a': [1, 2, 3]})
table = table.replace_schema_metadata({b'key': b'value'})
ds.write_dataset(table, tempdir, format="feather")
schema = feather.read_table(tempdir / "part-0.feather").schema
assert schema.metadata == {b'key': b'value'}
@pytest.mark.parquet
def test_write_dataset_schema_metadata_parquet(tempdir):
# ensure that schema metadata gets written
import pyarrow.parquet as pq
table = pa.table({'a': [1, 2, 3]})
table = table.replace_schema_metadata({b'key': b'value'})
ds.write_dataset(table, tempdir, format="parquet")
schema = pq.read_table(tempdir / "part-0.parquet").schema
assert schema.metadata == {b'key': b'value'}
@pytest.mark.parquet
@pytest.mark.s3
def test_write_dataset_s3(s3_example_simple):
# write dataset with s3 filesystem
_, _, fs, _, host, port, access_key, secret_key = s3_example_simple
uri_template = (
"s3://{}:{}@{{}}?scheme=http&endpoint_override={}:{}".format(
access_key, secret_key, host, port)
)
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))],
names=["f1", "f2", "part"]
)
part = ds.partitioning(pa.schema([("part", pa.string())]), flavor="hive")
# writing with filesystem object
ds.write_dataset(
table, "mybucket/dataset", filesystem=fs, format="feather",
partitioning=part
)
# check rountrip
result = ds.dataset(
"mybucket/dataset", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
# writing with URI
uri = uri_template.format("mybucket/dataset2")
ds.write_dataset(table, uri, format="feather", partitioning=part)
# check rountrip
result = ds.dataset(
"mybucket/dataset2", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
# writing with path + URI as filesystem
uri = uri_template.format("mybucket")
ds.write_dataset(
table, "dataset3", filesystem=uri, format="feather", partitioning=part
)
# check rountrip
result = ds.dataset(
"mybucket/dataset3", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
@pytest.mark.parquet
def test_dataset_null_to_dictionary_cast(tempdir, dataset_reader):
# ARROW-12420
import pyarrow.parquet as pq
table = pa.table({"a": [None, None]})
pq.write_table(table, tempdir / "test.parquet")
schema = pa.schema([
pa.field("a", pa.dictionary(pa.int32(), pa.string()))
])
fsds = ds.FileSystemDataset.from_paths(
paths=[tempdir / "test.parquet"],
schema=schema,
format=ds.ParquetFileFormat(),
filesystem=fs.LocalFileSystem(),
)
table = dataset_reader.to_table(fsds)
assert table.schema == schema
|
/**
* Copyright © CM.com. All rights reserved.
* See LICENSE.txt for license details.
*/
define([
'uiComponent',
'jquery'
], function (
Component,
$
) {
'use strict';
return Component.extend({
mainContainerSelector: null,
/**
* Initialization of component
*
* @returns {Object}
*/
initialize: function () {
this._super();
let self = this;
$(this.mainContainerSelector).find('select').each(function() {
if ($(this).attr('id').indexOf('allowspecific') !== -1) {
self.initChildValues($(this));
}
if ($(this).attr('id').indexOf('allow_specific_currency') !== -1) {
self.initChildValues($(this));
$(this).change(function (event) {
self.initChildValues($(event.target));
});
}
})
return this;
},
/**
* Init of child values (specific countries, currencies)
*
* @param {HTMLElement} element
*/
initChildValues: function (element) {
let childValuesContainer = element.closest('tr').nextAll('tr')[0];
if (element.val() == '0') {
$(childValuesContainer).find('select').attr('disabled', true);
} else {
$(childValuesContainer).find('select').attr('disabled', false);
}
}
});
})
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD MobilenetV1 FPN Feature Extractor."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Mobilenet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v1 layers
{Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise,
Conv2d_13_pointwise}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDMobileNetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('fpn', reuse=self._reuse_weights):
feature_blocks = [
'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
'Conv2d_13_pointwise'
]
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append(feature_blocks[level - 2])
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=depth_fn(256))
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
feature_blocks[base_fpn_max_level - 2])]
# Construct coarse features
for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1):
last_feature_map = slim.conv2d(
last_feature_map,
num_outputs=depth_fn(256),
kernel_size=[3, 3],
stride=2,
padding='SAME',
scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13))
feature_maps.append(last_feature_map)
return feature_maps
|
/*!
* remark (http://getbootstrapadmin.com/remark)
* Copyright 2017 amazingsurge
* Licensed under the Themeforest Standard Licenses
*/
(function(document, window, $) {
'use strict';
var Site = window.Site;
$(document).ready(function($) {
Site.run();
var $example = $('#exampleTransition');
$(document).on('click.panel.transition', '[data-type]', function() {
var type = $(this).data('type');
$example.data('animateList').run(type);
});
$(document).on('close.uikit.panel', '[class*=blocks-] > li > .panel', function() {
$(this).parent().hide();
});
});
})(document, window, jQuery);
|
30 mtime=1639777085.394355983
30 atime=1639777086.327355983
30 ctime=1639777127.553355983
|
# -*- coding: utf-8 -*-
"""Test for tmuxp configuration import, inlining, expanding and export."""
from __future__ import absolute_import, unicode_literals
import os
import pytest
import kaptan
from tmuxp import config, exc
from . import example_dir
from .fixtures import config as fixtures
TMUXP_DIR = os.path.join(os.path.dirname(__file__), '.tmuxp')
def load_yaml(yaml):
return kaptan.Kaptan(handler='yaml').import_config(yaml).get()
def load_config(_file):
return kaptan.Kaptan().import_config(_file).get()
def test_export_json(tmpdir):
json_config_file = tmpdir.join('config.json')
configparser = kaptan.Kaptan()
configparser.import_config(fixtures.sampleconfig.sampleconfigdict)
json_config_data = configparser.export('json', indent=2)
json_config_file.write(json_config_data)
new_config = kaptan.Kaptan()
new_config_data = new_config.import_config(str(json_config_file)).get()
assert fixtures.sampleconfig.sampleconfigdict == new_config_data
def test_export_yaml(tmpdir):
yaml_config_file = tmpdir.join('config.yaml')
configparser = kaptan.Kaptan()
sampleconfig = config.inline(fixtures.sampleconfig.sampleconfigdict)
configparser.import_config(sampleconfig)
yaml_config_data = configparser.export('yaml', indent=2, default_flow_style=False)
yaml_config_file.write(yaml_config_data)
new_config_data = load_config(str(yaml_config_file))
assert fixtures.sampleconfig.sampleconfigdict == new_config_data
def test_scan_config(tmpdir):
configs = []
garbage_file = tmpdir.join('config.psd')
garbage_file.write('wat')
for r, d, f in os.walk(str(tmpdir)):
for filela in (x for x in f if x.endswith(('.json', '.ini', 'yaml'))):
configs.append(str(tmpdir.join(filela)))
files = 0
if tmpdir.join('config.json').check():
files += 1
assert str(tmpdir.join('config.json')) in configs
if tmpdir.join('config.yaml').check():
files += 1
assert str(tmpdir.join('config.yaml')) in configs
if tmpdir.join('config.ini').check():
files += 1
assert str(tmpdir.join('config.ini')) in configs
assert len(configs) == files
def test_config_expand1():
"""Expand shell commands from string to list."""
test_config = config.expand(fixtures.expand1.before_config)
assert test_config == fixtures.expand1.after_config
def test_config_expand2():
"""Expand shell commands from string to list."""
unexpanded_dict = load_yaml(fixtures.expand2.unexpanded_yaml)
expanded_dict = load_yaml(fixtures.expand2.expanded_yaml)
assert config.expand(unexpanded_dict) == expanded_dict
"""Tests for :meth:`config.inline()`."""
ibefore_config = { # inline config
'session_name': 'sampleconfig',
'start_directory': '~',
'windows': [
{
'shell_command': ['top'],
'window_name': 'editor',
'panes': [{'shell_command': ['vim']}, {'shell_command': ['cowsay "hey"']}],
'layout': 'main-verticle',
},
{
'window_name': 'logging',
'panes': [{'shell_command': ['tail -F /var/log/syslog']}],
},
{'options': {'automatic-rename': True}, 'panes': [{'shell_command': ['htop']}]},
],
}
iafter_config = {
'session_name': 'sampleconfig',
'start_directory': '~',
'windows': [
{
'shell_command': 'top',
'window_name': 'editor',
'panes': ['vim', 'cowsay "hey"'],
'layout': 'main-verticle',
},
{'window_name': 'logging', 'panes': ['tail -F /var/log/syslog']},
{'options': {'automatic-rename': True}, 'panes': ['htop']},
],
}
def test_inline_config():
""":meth:`config.inline()` shell commands list to string."""
test_config = config.inline(ibefore_config)
assert test_config == iafter_config
"""Test config inheritance for the nested 'start_command'."""
inheritance_config_before = {
'session_name': 'sampleconfig',
'start_directory': '/',
'windows': [
{
'window_name': 'editor',
'start_directory': '~',
'panes': [{'shell_command': ['vim']}, {'shell_command': ['cowsay "hey"']}],
'layout': 'main-verticle',
},
{
'window_name': 'logging',
'panes': [{'shell_command': ['tail -F /var/log/syslog']}],
},
{'window_name': 'shufu', 'panes': [{'shell_command': ['htop']}]},
{'options': {'automatic-rename': True}, 'panes': [{'shell_command': ['htop']}]},
],
}
inheritance_config_after = {
'session_name': 'sampleconfig',
'start_directory': '/',
'windows': [
{
'window_name': 'editor',
'start_directory': '~',
'panes': [{'shell_command': ['vim']}, {'shell_command': ['cowsay "hey"']}],
'layout': 'main-verticle',
},
{
'window_name': 'logging',
'panes': [{'shell_command': ['tail -F /var/log/syslog']}],
},
{'window_name': 'shufu', 'panes': [{'shell_command': ['htop']}]},
{'options': {'automatic-rename': True}, 'panes': [{'shell_command': ['htop']}]},
],
}
def test_inheritance_config():
config = inheritance_config_before
# TODO: Look at verifying window_start_directory
# if 'start_directory' in config:
# session_start_directory = config['start_directory']
# else:
# session_start_directory = None
# for windowconfitem in config['windows']:
# window_start_directory = None
#
# if 'start_directory' in windowconfitem:
# window_start_directory = windowconfitem['start_directory']
# elif session_start_directory:
# window_start_directory = session_start_directory
#
# for paneconfitem in windowconfitem['panes']:
# if 'start_directory' in paneconfitem:
# pane_start_directory = paneconfitem['start_directory']
# elif window_start_directory:
# paneconfitem['start_directory'] = window_start_directory
# elif session_start_directory:
# paneconfitem['start_directory'] = session_start_directory
assert config == inheritance_config_after
def test_shell_command_before():
"""Config inheritance for the nested 'start_command'."""
test_config = fixtures.shell_command_before.config_unexpanded
test_config = config.expand(test_config)
assert test_config == fixtures.shell_command_before.config_expanded
test_config = config.trickle(test_config)
assert test_config == fixtures.shell_command_before.config_after
def test_in_session_scope():
sconfig = load_yaml(fixtures.shell_command_before_session.before)
config.validate_schema(sconfig)
assert config.expand(sconfig) == sconfig
assert config.expand(config.trickle(sconfig)) == load_yaml(
fixtures.shell_command_before_session.expected
)
def test_trickle_relative_start_directory():
test_config = config.trickle(fixtures.trickle.before)
assert test_config == fixtures.trickle.expected
def test_trickle_window_with_no_pane_config():
test_yaml = """
session_name: test_session
windows:
- window_name: test_1
panes:
- shell_command:
- ls -l
- window_name: test_no_panes
"""
sconfig = load_yaml(test_yaml)
config.validate_schema(sconfig)
assert config.expand(config.trickle(sconfig))['windows'][1]['panes'][0] == {
'shell_command': []
}
def test_expands_blank_panes():
"""Expand blank config into full form.
Handle ``NoneType`` and 'blank'::
# nothing, None, 'blank'
'panes': [
None,
'blank'
]
# should be blank
'panes': [
'shell_command': []
]
Blank strings::
panes: [
''
]
# should output to:
panes:
'shell_command': ['']
"""
yaml_config_file = os.path.join(example_dir, 'blank-panes.yaml')
test_config = load_config(yaml_config_file)
assert config.expand(test_config) == fixtures.expand_blank.expected
def test_no_session_name():
yaml_config = """
- window_name: editor
panes:
shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
- window_name: logging
automatic-rename: true
panes:
- shell_command:
- htop
"""
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
with pytest.raises(exc.ConfigError) as excinfo:
config.validate_schema(sconfig)
assert excinfo.matches(r'requires "session_name"')
def test_no_windows():
yaml_config = """
session_name: test session
"""
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
with pytest.raises(exc.ConfigError) as excinfo:
config.validate_schema(sconfig)
assert excinfo.match(r'list of "windows"')
def test_no_window_name():
yaml_config = """
session_name: test session
windows:
- window_name: editor
panes:
shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
- automatic-rename: true
panes:
- shell_command:
- htop
"""
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
with pytest.raises(exc.ConfigError) as excinfo:
config.validate_schema(sconfig)
assert excinfo.matches('missing "window_name"')
def test_replaces_env_variables(monkeypatch):
env_key = "TESTHEY92"
env_val = "HEYO1"
yaml_config = """
start_directory: {TEST_VAR}/test
shell_command_before: {TEST_VAR}/test2
before_script: {TEST_VAR}/test3
session_name: hi - {TEST_VAR}
options:
default-command: {TEST_VAR}/lol
global_options:
default-shell: {TEST_VAR}/moo
windows:
- window_name: editor
panes:
- shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
- window_name: logging @ {TEST_VAR}
automatic-rename: true
panes:
- shell_command:
- htop
""".format(
TEST_VAR="${%s}" % env_key
)
sconfig = load_yaml(yaml_config)
monkeypatch.setenv(str(env_key), str(env_val))
sconfig = config.expand(sconfig)
assert "%s/test" % env_val == sconfig['start_directory']
assert "%s/test2" % env_val in sconfig['shell_command_before']
assert "%s/test3" % env_val == sconfig['before_script']
assert "hi - %s" % env_val == sconfig['session_name']
assert "%s/moo" % env_val == sconfig['global_options']['default-shell']
assert "%s/lol" % env_val == sconfig['options']['default-command']
assert "logging @ %s" % env_val == sconfig['windows'][1]['window_name']
|
def latinnum(x):
x=int(x)
if x<=0:
raise ValueError("Value must be greater than zero to produce an equivalent latin string")
lx=""
while x>25:
lx+=chr(ord('A')+int(x/25))
x%=25
lx+=chr(ord('A')+x)
return lx |
(window.webpackJsonp=window.webpackJsonp||[]).push([[2],{2:function(e,t,n){e.exports=n("hN/g")},PDX0:function(e,t){(function(t){e.exports=t}).call(this,{})},"hN/g":function(e,t,n){"use strict";n.r(t);var r,o=n("mrSG");r=function(){!function(e){var t=e.performance;function n(e){t&&t.mark&&t.mark(e)}function r(e,n){t&&t.measure&&t.measure(e,n)}n("Zone");var o=e.__Zone_symbol_prefix||"__zone_symbol__";function a(e){return o+e}var i=!0===e[a("forceDuplicateZoneCheck")];if(e.Zone){if(i||"function"!=typeof e.Zone.__symbol__)throw new Error("Zone already loaded.");return e.Zone}var c=function(){function t(e,t){this._parent=e,this._name=t?t.name||"unnamed":"<root>",this._properties=t&&t.properties||{},this._zoneDelegate=new l(this,this._parent&&this._parent._zoneDelegate,t)}return t.assertZonePatched=function(){if(e.Promise!==Z.ZoneAwarePromise)throw new Error("Zone.js has detected that ZoneAwarePromise `(window|global).Promise` has been overwritten.\nMost likely cause is that a Promise polyfill has been loaded after Zone.js (Polyfilling Promise api is not necessary when zone.js is loaded. If you must load one, do so before loading zone.js.)")},Object.defineProperty(t,"root",{get:function(){for(var e=t.current;e.parent;)e=e.parent;return e},enumerable:!0,configurable:!0}),Object.defineProperty(t,"current",{get:function(){return C.zone},enumerable:!0,configurable:!0}),Object.defineProperty(t,"currentTask",{get:function(){return z},enumerable:!0,configurable:!0}),t.__load_patch=function(o,a){if(Z.hasOwnProperty(o)){if(i)throw Error("Already loaded patch: "+o)}else if(!e["__Zone_disable_"+o]){var c="Zone:"+o;n(c),Z[o]=a(e,t,j),r(c,c)}},Object.defineProperty(t.prototype,"parent",{get:function(){return this._parent},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"name",{get:function(){return this._name},enumerable:!0,configurable:!0}),t.prototype.get=function(e){var t=this.getZoneWith(e);if(t)return t._properties[e]},t.prototype.getZoneWith=function(e){for(var t=this;t;){if(t._properties.hasOwnProperty(e))return t;t=t._parent}return null},t.prototype.fork=function(e){if(!e)throw new Error("ZoneSpec required!");return this._zoneDelegate.fork(this,e)},t.prototype.wrap=function(e,t){if("function"!=typeof e)throw new Error("Expecting function got: "+e);var n=this._zoneDelegate.intercept(this,e,t),r=this;return function(){return r.runGuarded(n,this,arguments,t)}},t.prototype.run=function(e,t,n,r){C={parent:C,zone:this};try{return this._zoneDelegate.invoke(this,e,t,n,r)}finally{C=C.parent}},t.prototype.runGuarded=function(e,t,n,r){void 0===t&&(t=null),C={parent:C,zone:this};try{try{return this._zoneDelegate.invoke(this,e,t,n,r)}catch(o){if(this._zoneDelegate.handleError(this,o))throw o}}finally{C=C.parent}},t.prototype.runTask=function(e,t,n){if(e.zone!=this)throw new Error("A task can only be run in the zone of creation! (Creation: "+(e.zone||b).name+"; Execution: "+this.name+")");if(e.state!==m||e.type!==D&&e.type!==P){var r=e.state!=w;r&&e._transitionTo(w,T),e.runCount++;var o=z;z=e,C={parent:C,zone:this};try{e.type==P&&e.data&&!e.data.isPeriodic&&(e.cancelFn=void 0);try{return this._zoneDelegate.invokeTask(this,e,t,n)}catch(a){if(this._zoneDelegate.handleError(this,a))throw a}}finally{e.state!==m&&e.state!==O&&(e.type==D||e.data&&e.data.isPeriodic?r&&e._transitionTo(T,w):(e.runCount=0,this._updateTaskCount(e,-1),r&&e._transitionTo(m,w,m))),C=C.parent,z=o}}},t.prototype.scheduleTask=function(e){if(e.zone&&e.zone!==this)for(var t=this;t;){if(t===e.zone)throw Error("can not reschedule task to "+this.name+" which is descendants of the original zone "+e.zone.name);t=t.parent}e._transitionTo(k,m);var n=[];e._zoneDelegates=n,e._zone=this;try{e=this._zoneDelegate.scheduleTask(this,e)}catch(r){throw e._transitionTo(O,k,m),this._zoneDelegate.handleError(this,r),r}return e._zoneDelegates===n&&this._updateTaskCount(e,1),e.state==k&&e._transitionTo(T,k),e},t.prototype.scheduleMicroTask=function(e,t,n,r){return this.scheduleTask(new f(S,e,t,n,r,void 0))},t.prototype.scheduleMacroTask=function(e,t,n,r,o){return this.scheduleTask(new f(P,e,t,n,r,o))},t.prototype.scheduleEventTask=function(e,t,n,r,o){return this.scheduleTask(new f(D,e,t,n,r,o))},t.prototype.cancelTask=function(e){if(e.zone!=this)throw new Error("A task can only be cancelled in the zone of creation! (Creation: "+(e.zone||b).name+"; Execution: "+this.name+")");e._transitionTo(E,T,w);try{this._zoneDelegate.cancelTask(this,e)}catch(t){throw e._transitionTo(O,E),this._zoneDelegate.handleError(this,t),t}return this._updateTaskCount(e,-1),e._transitionTo(m,E),e.runCount=0,e},t.prototype._updateTaskCount=function(e,t){var n=e._zoneDelegates;-1==t&&(e._zoneDelegates=null);for(var r=0;r<n.length;r++)n[r]._updateTaskCount(e.type,t)},t}();c.__symbol__=a;var s,u={name:"",onHasTask:function(e,t,n,r){return e.hasTask(n,r)},onScheduleTask:function(e,t,n,r){return e.scheduleTask(n,r)},onInvokeTask:function(e,t,n,r,o,a){return e.invokeTask(n,r,o,a)},onCancelTask:function(e,t,n,r){return e.cancelTask(n,r)}},l=function(){function e(e,t,n){this._taskCounts={microTask:0,macroTask:0,eventTask:0},this.zone=e,this._parentDelegate=t,this._forkZS=n&&(n&&n.onFork?n:t._forkZS),this._forkDlgt=n&&(n.onFork?t:t._forkDlgt),this._forkCurrZone=n&&(n.onFork?this.zone:t._forkCurrZone),this._interceptZS=n&&(n.onIntercept?n:t._interceptZS),this._interceptDlgt=n&&(n.onIntercept?t:t._interceptDlgt),this._interceptCurrZone=n&&(n.onIntercept?this.zone:t._interceptCurrZone),this._invokeZS=n&&(n.onInvoke?n:t._invokeZS),this._invokeDlgt=n&&(n.onInvoke?t:t._invokeDlgt),this._invokeCurrZone=n&&(n.onInvoke?this.zone:t._invokeCurrZone),this._handleErrorZS=n&&(n.onHandleError?n:t._handleErrorZS),this._handleErrorDlgt=n&&(n.onHandleError?t:t._handleErrorDlgt),this._handleErrorCurrZone=n&&(n.onHandleError?this.zone:t._handleErrorCurrZone),this._scheduleTaskZS=n&&(n.onScheduleTask?n:t._scheduleTaskZS),this._scheduleTaskDlgt=n&&(n.onScheduleTask?t:t._scheduleTaskDlgt),this._scheduleTaskCurrZone=n&&(n.onScheduleTask?this.zone:t._scheduleTaskCurrZone),this._invokeTaskZS=n&&(n.onInvokeTask?n:t._invokeTaskZS),this._invokeTaskDlgt=n&&(n.onInvokeTask?t:t._invokeTaskDlgt),this._invokeTaskCurrZone=n&&(n.onInvokeTask?this.zone:t._invokeTaskCurrZone),this._cancelTaskZS=n&&(n.onCancelTask?n:t._cancelTaskZS),this._cancelTaskDlgt=n&&(n.onCancelTask?t:t._cancelTaskDlgt),this._cancelTaskCurrZone=n&&(n.onCancelTask?this.zone:t._cancelTaskCurrZone),this._hasTaskZS=null,this._hasTaskDlgt=null,this._hasTaskDlgtOwner=null,this._hasTaskCurrZone=null;var r=n&&n.onHasTask;(r||t&&t._hasTaskZS)&&(this._hasTaskZS=r?n:u,this._hasTaskDlgt=t,this._hasTaskDlgtOwner=this,this._hasTaskCurrZone=e,n.onScheduleTask||(this._scheduleTaskZS=u,this._scheduleTaskDlgt=t,this._scheduleTaskCurrZone=this.zone),n.onInvokeTask||(this._invokeTaskZS=u,this._invokeTaskDlgt=t,this._invokeTaskCurrZone=this.zone),n.onCancelTask||(this._cancelTaskZS=u,this._cancelTaskDlgt=t,this._cancelTaskCurrZone=this.zone))}return e.prototype.fork=function(e,t){return this._forkZS?this._forkZS.onFork(this._forkDlgt,this.zone,e,t):new c(e,t)},e.prototype.intercept=function(e,t,n){return this._interceptZS?this._interceptZS.onIntercept(this._interceptDlgt,this._interceptCurrZone,e,t,n):t},e.prototype.invoke=function(e,t,n,r,o){return this._invokeZS?this._invokeZS.onInvoke(this._invokeDlgt,this._invokeCurrZone,e,t,n,r,o):t.apply(n,r)},e.prototype.handleError=function(e,t){return!this._handleErrorZS||this._handleErrorZS.onHandleError(this._handleErrorDlgt,this._handleErrorCurrZone,e,t)},e.prototype.scheduleTask=function(e,t){var n=t;if(this._scheduleTaskZS)this._hasTaskZS&&n._zoneDelegates.push(this._hasTaskDlgtOwner),(n=this._scheduleTaskZS.onScheduleTask(this._scheduleTaskDlgt,this._scheduleTaskCurrZone,e,t))||(n=t);else if(t.scheduleFn)t.scheduleFn(t);else{if(t.type!=S)throw new Error("Task is missing scheduleFn.");g(t)}return n},e.prototype.invokeTask=function(e,t,n,r){return this._invokeTaskZS?this._invokeTaskZS.onInvokeTask(this._invokeTaskDlgt,this._invokeTaskCurrZone,e,t,n,r):t.callback.apply(n,r)},e.prototype.cancelTask=function(e,t){var n;if(this._cancelTaskZS)n=this._cancelTaskZS.onCancelTask(this._cancelTaskDlgt,this._cancelTaskCurrZone,e,t);else{if(!t.cancelFn)throw Error("Task is not cancelable");n=t.cancelFn(t)}return n},e.prototype.hasTask=function(e,t){try{this._hasTaskZS&&this._hasTaskZS.onHasTask(this._hasTaskDlgt,this._hasTaskCurrZone,e,t)}catch(n){this.handleError(e,n)}},e.prototype._updateTaskCount=function(e,t){var n=this._taskCounts,r=n[e],o=n[e]=r+t;if(o<0)throw new Error("More tasks executed then were scheduled.");0!=r&&0!=o||this.hasTask(this.zone,{microTask:n.microTask>0,macroTask:n.macroTask>0,eventTask:n.eventTask>0,change:e})},e}(),f=function(){function t(n,r,o,a,i,c){if(this._zone=null,this.runCount=0,this._zoneDelegates=null,this._state="notScheduled",this.type=n,this.source=r,this.data=a,this.scheduleFn=i,this.cancelFn=c,!o)throw new Error("callback is not defined");this.callback=o;var s=this;this.invoke=n===D&&a&&a.useG?t.invokeTask:function(){return t.invokeTask.call(e,s,this,arguments)}}return t.invokeTask=function(e,t,n){e||(e=this),I++;try{return e.runCount++,e.zone.runTask(e,t,n)}finally{1==I&&_(),I--}},Object.defineProperty(t.prototype,"zone",{get:function(){return this._zone},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"state",{get:function(){return this._state},enumerable:!0,configurable:!0}),t.prototype.cancelScheduleRequest=function(){this._transitionTo(m,k)},t.prototype._transitionTo=function(e,t,n){if(this._state!==t&&this._state!==n)throw new Error(this.type+" '"+this.source+"': can not transition to '"+e+"', expecting state '"+t+"'"+(n?" or '"+n+"'":"")+", was '"+this._state+"'.");this._state=e,e==m&&(this._zoneDelegates=null)},t.prototype.toString=function(){return this.data&&void 0!==this.data.handleId?this.data.handleId.toString():Object.prototype.toString.call(this)},t.prototype.toJSON=function(){return{type:this.type,state:this.state,source:this.source,zone:this.zone.name,runCount:this.runCount}},t}(),p=a("setTimeout"),h=a("Promise"),d=a("then"),v=[],y=!1;function g(t){if(0===I&&0===v.length)if(s||e[h]&&(s=e[h].resolve(0)),s){var n=s[d];n||(n=s.then),n.call(s,_)}else e[p](_,0);t&&v.push(t)}function _(){if(!y){for(y=!0;v.length;){var e=v;v=[];for(var t=0;t<e.length;t++){var n=e[t];try{n.zone.runTask(n,null,null)}catch(r){j.onUnhandledError(r)}}}j.microtaskDrainDone(),y=!1}}var b={name:"NO ZONE"},m="notScheduled",k="scheduling",T="scheduled",w="running",E="canceling",O="unknown",S="microTask",P="macroTask",D="eventTask",Z={},j={symbol:a,currentZoneFrame:function(){return C},onUnhandledError:R,microtaskDrainDone:R,scheduleMicroTask:g,showUncaughtError:function(){return!c[a("ignoreConsoleErrorUncaughtError")]},patchEventTarget:function(){return[]},patchOnProperties:R,patchMethod:function(){return R},bindArguments:function(){return[]},patchThen:function(){return R},patchMacroTask:function(){return R},setNativePromise:function(e){e&&"function"==typeof e.resolve&&(s=e.resolve(0))},patchEventPrototype:function(){return R},isIEOrEdge:function(){return!1},getGlobalObjects:function(){},ObjectDefineProperty:function(){return R},ObjectGetOwnPropertyDescriptor:function(){},ObjectCreate:function(){},ArraySlice:function(){return[]},patchClass:function(){return R},wrapWithCurrentZone:function(){return R},filterProperties:function(){return[]},attachOriginToPatched:function(){return R},_redefineProperty:function(){return R},patchCallbacks:function(){return R}},C={parent:null,zone:new c(null,null)},z=null,I=0;function R(){}r("Zone","Zone"),e.Zone=c}("undefined"!=typeof window&&window||"undefined"!=typeof self&&self||global),Zone.__load_patch("ZoneAwarePromise",function(e,t,n){var r=Object.getOwnPropertyDescriptor,a=Object.defineProperty,i=n.symbol,c=[],s=i("Promise"),u=i("then"),l="__creationTrace__";n.onUnhandledError=function(e){if(n.showUncaughtError()){var t=e&&e.rejection;t?console.error("Unhandled Promise rejection:",t instanceof Error?t.message:t,"; Zone:",e.zone.name,"; Task:",e.task&&e.task.source,"; Value:",t,t instanceof Error?t.stack:void 0):console.error(e)}},n.microtaskDrainDone=function(){for(;c.length;)for(var e=function(){var e=c.shift();try{e.zone.runGuarded(function(){throw e})}catch(t){p(t)}};c.length;)e()};var f=i("unhandledPromiseRejectionHandler");function p(e){n.onUnhandledError(e);try{var r=t[f];r&&"function"==typeof r&&r.call(this,e)}catch(o){}}function h(e){return e&&e.then}function d(e){return e}function v(e){return R.reject(e)}var y=i("state"),g=i("value"),_=i("finally"),b=i("parentPromiseValue"),m=i("parentPromiseState"),k="Promise.then",T=null,w=!0,E=!1,O=0;function S(e,t){return function(n){try{j(e,t,n)}catch(r){j(e,!1,r)}}}var P=function(){var e=!1;return function(t){return function(){e||(e=!0,t.apply(null,arguments))}}},D="Promise resolved with itself",Z=i("currentTaskTrace");function j(e,r,o){var i,s=P();if(e===o)throw new TypeError(D);if(e[y]===T){var u=null;try{"object"!=typeof o&&"function"!=typeof o||(u=o&&o.then)}catch(v){return s(function(){j(e,!1,v)})(),e}if(r!==E&&o instanceof R&&o.hasOwnProperty(y)&&o.hasOwnProperty(g)&&o[y]!==T)z(o),j(e,o[y],o[g]);else if(r!==E&&"function"==typeof u)try{u.call(o,s(S(e,r)),s(S(e,!1)))}catch(v){s(function(){j(e,!1,v)})()}else{e[y]=r;var f=e[g];if(e[g]=o,e[_]===_&&r===w&&(e[y]=e[m],e[g]=e[b]),r===E&&o instanceof Error){var p=t.currentTask&&t.currentTask.data&&t.currentTask.data[l];p&&a(o,Z,{configurable:!0,enumerable:!1,writable:!0,value:p})}for(var h=0;h<f.length;)I(e,f[h++],f[h++],f[h++],f[h++]);if(0==f.length&&r==E){e[y]=O;try{throw new Error("Uncaught (in promise): "+((i=o)&&i.toString===Object.prototype.toString?(i.constructor&&i.constructor.name||"")+": "+JSON.stringify(i):i?i.toString():Object.prototype.toString.call(i))+(o&&o.stack?"\n"+o.stack:""))}catch(v){var d=v;d.rejection=o,d.promise=e,d.zone=t.current,d.task=t.currentTask,c.push(d),n.scheduleMicroTask()}}}}return e}var C=i("rejectionHandledHandler");function z(e){if(e[y]===O){try{var n=t[C];n&&"function"==typeof n&&n.call(this,{rejection:e[g],promise:e})}catch(o){}e[y]=E;for(var r=0;r<c.length;r++)e===c[r].promise&&c.splice(r,1)}}function I(e,t,n,r,o){z(e);var a=e[y],i=a?"function"==typeof r?r:d:"function"==typeof o?o:v;t.scheduleMicroTask(k,function(){try{var r=e[g],o=!!n&&_===n[_];o&&(n[b]=r,n[m]=a);var c=t.run(i,void 0,o&&i!==v&&i!==d?[]:[r]);j(n,!0,c)}catch(s){j(n,!1,s)}},n)}var R=function(){function e(t){if(!(this instanceof e))throw new Error("Must be an instanceof Promise.");this[y]=T,this[g]=[];try{t&&t(S(this,w),S(this,E))}catch(n){j(this,!1,n)}}return e.toString=function(){return"function ZoneAwarePromise() { [native code] }"},e.resolve=function(e){return j(new this(null),w,e)},e.reject=function(e){return j(new this(null),E,e)},e.race=function(e){var t,n,r,a,i=new this(function(e,t){r=e,a=t});function c(e){r(e)}function s(e){a(e)}try{for(var u=Object(o.__values)(e),l=u.next();!l.done;l=u.next()){var f=l.value;h(f)||(f=this.resolve(f)),f.then(c,s)}}catch(p){t={error:p}}finally{try{l&&!l.done&&(n=u.return)&&n.call(u)}finally{if(t)throw t.error}}return i},e.all=function(t){return e.allWithCallback(t)},e.allSettled=function(t){return(this&&this.prototype instanceof e?this:e).allWithCallback(t,{thenCallback:function(e){return{status:"fulfilled",value:e}},errorCallback:function(e){return{status:"rejected",reason:e}}})},e.allWithCallback=function(e,t){var n,r,a,i,c=new this(function(e,t){a=e,i=t}),s=2,u=0,l=[],f=function(e){h(e)||(e=p.resolve(e));var n=u;try{e.then(function(e){l[n]=t?t.thenCallback(e):e,0==--s&&a(l)},function(e){t?(l[n]=t.errorCallback(e),0==--s&&a(l)):i(e)})}catch(r){i(r)}s++,u++},p=this;try{for(var d=Object(o.__values)(e),v=d.next();!v.done;v=d.next())f(v.value)}catch(y){n={error:y}}finally{try{v&&!v.done&&(r=d.return)&&r.call(d)}finally{if(n)throw n.error}}return 0==(s-=2)&&a(l),c},Object.defineProperty(e.prototype,Symbol.toStringTag,{get:function(){return"Promise"},enumerable:!0,configurable:!0}),e.prototype.then=function(e,n){var r=new this.constructor(null),o=t.current;return this[y]==T?this[g].push(o,r,e,n):I(this,o,r,e,n),r},e.prototype.catch=function(e){return this.then(null,e)},e.prototype.finally=function(e){var n=new this.constructor(null);n[_]=_;var r=t.current;return this[y]==T?this[g].push(r,n,e,e):I(this,r,n,e,e),n},e}();R.resolve=R.resolve,R.reject=R.reject,R.race=R.race,R.all=R.all;var M=e[s]=e.Promise,x=t.__symbol__("ZoneAwarePromise"),L=r(e,"Promise");L&&!L.configurable||(L&&delete L.writable,L&&delete L.value,L||(L={configurable:!0,enumerable:!0}),L.get=function(){return e[x]?e[x]:e[s]},L.set=function(t){t===R?e[x]=t:(e[s]=t,t.prototype[u]||A(t),n.setNativePromise(t))},a(e,"Promise",L)),e.Promise=R;var N,F=i("thenPatched");function A(e){var t=e.prototype,n=r(t,"then");if(!n||!1!==n.writable&&n.configurable){var o=t.then;t[u]=o,e.prototype.then=function(e,t){var n=this;return new R(function(e,t){o.call(n,e,t)}).then(e,t)},e[F]=!0}}if(n.patchThen=A,M){A(M);var H=e.fetch;"function"==typeof H&&(e[n.symbol("fetch")]=H,e.fetch=(N=H,function(){var e=N.apply(this,arguments);if(e instanceof R)return e;var t=e.constructor;return t[F]||A(t),e}))}return Promise[t.__symbol__("uncaughtPromiseErrors")]=c,R});var e=Object.getOwnPropertyDescriptor,t=Object.defineProperty,n=Object.getPrototypeOf,r=Object.create,a=Array.prototype.slice,i="addEventListener",c="removeEventListener",s=Zone.__symbol__(i),u=Zone.__symbol__(c),l="true",f="false",p=Zone.__symbol__("");function h(e,t){return Zone.current.wrap(e,t)}function d(e,t,n,r,o){return Zone.current.scheduleMacroTask(e,t,n,r,o)}var v=Zone.__symbol__,y="undefined"!=typeof window,g=y?window:void 0,_=y&&g||"object"==typeof self&&self||global,b="removeAttribute",m=[null];function k(e,t){for(var n=e.length-1;n>=0;n--)"function"==typeof e[n]&&(e[n]=h(e[n],t+"_"+n));return e}function T(e){return!e||!1!==e.writable&&!("function"==typeof e.get&&void 0===e.set)}var w="undefined"!=typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope,E=!("nw"in _)&&void 0!==_.process&&"[object process]"==={}.toString.call(_.process),O=!E&&!w&&!(!y||!g.HTMLElement),S=void 0!==_.process&&"[object process]"==={}.toString.call(_.process)&&!w&&!(!y||!g.HTMLElement),P={},D=function(e){if(e=e||_.event){var t=P[e.type];t||(t=P[e.type]=v("ON_PROPERTY"+e.type));var n,r=this||e.target||_,o=r[t];if(O&&r===g&&"error"===e.type){var a=e;!0===(n=o&&o.call(this,a.message,a.filename,a.lineno,a.colno,a.error))&&e.preventDefault()}else null==(n=o&&o.apply(this,arguments))||n||e.preventDefault();return n}};function Z(n,r,o){var a=e(n,r);if(!a&&o&&e(o,r)&&(a={enumerable:!0,configurable:!0}),a&&a.configurable){var i=v("on"+r+"patched");if(!n.hasOwnProperty(i)||!n[i]){delete a.writable,delete a.value;var c=a.get,s=a.set,u=r.substr(2),l=P[u];l||(l=P[u]=v("ON_PROPERTY"+u)),a.set=function(e){var t=this;t||n!==_||(t=_),t&&(t[l]&&t.removeEventListener(u,D),s&&s.apply(t,m),"function"==typeof e?(t[l]=e,t.addEventListener(u,D,!1)):t[l]=null)},a.get=function(){var e=this;if(e||n!==_||(e=_),!e)return null;var t=e[l];if(t)return t;if(c){var o=c&&c.call(this);if(o)return a.set.call(this,o),"function"==typeof e[b]&&e.removeAttribute(r),o}return null},t(n,r,a),n[i]=!0}}}function j(e,t,n){if(t)for(var r=0;r<t.length;r++)Z(e,"on"+t[r],n);else{var o=[];for(var a in e)"on"==a.substr(0,2)&&o.push(a);for(var i=0;i<o.length;i++)Z(e,o[i],n)}}var C=v("originalInstance");function z(e){var n=_[e];if(n){_[v(e)]=n,_[e]=function(){var t=k(arguments,e);switch(t.length){case 0:this[C]=new n;break;case 1:this[C]=new n(t[0]);break;case 2:this[C]=new n(t[0],t[1]);break;case 3:this[C]=new n(t[0],t[1],t[2]);break;case 4:this[C]=new n(t[0],t[1],t[2],t[3]);break;default:throw new Error("Arg list too long.")}},x(_[e],n);var r,o=new n(function(){});for(r in o)"XMLHttpRequest"===e&&"responseBlob"===r||function(n){"function"==typeof o[n]?_[e].prototype[n]=function(){return this[C][n].apply(this[C],arguments)}:t(_[e].prototype,n,{set:function(t){"function"==typeof t?(this[C][n]=h(t,e+"."+n),x(this[C][n],t)):this[C][n]=t},get:function(){return this[C][n]}})}(r);for(r in n)"prototype"!==r&&n.hasOwnProperty(r)&&(_[e][r]=n[r])}}var I=!1;function R(t,r,o){for(var a=t;a&&!a.hasOwnProperty(r);)a=n(a);!a&&t[r]&&(a=t);var i,c,s=v(r),u=null;if(a&&!(u=a[s])&&(u=a[s]=a[r],T(a&&e(a,r)))){var l=o(u,s,r);a[r]=function(){return l(this,arguments)},x(a[r],u),I&&(i=u,c=a[r],"function"==typeof Object.getOwnPropertySymbols&&Object.getOwnPropertySymbols(i).forEach(function(e){var t=Object.getOwnPropertyDescriptor(i,e);Object.defineProperty(c,e,{get:function(){return i[e]},set:function(n){(!t||t.writable&&"function"==typeof t.set)&&(i[e]=n)},enumerable:!t||t.enumerable,configurable:!t||t.configurable})}))}return u}function M(e,t,n){var r=null;function o(e){var t=e.data;return t.args[t.cbIdx]=function(){e.invoke.apply(this,arguments)},r.apply(t.target,t.args),e}r=R(e,t,function(e){return function(t,r){var a=n(t,r);return a.cbIdx>=0&&"function"==typeof r[a.cbIdx]?d(a.name,r[a.cbIdx],a,o):e.apply(t,r)}})}function x(e,t){e[v("OriginalDelegate")]=t}var L=!1,N=!1;function F(){if(L)return N;L=!0;try{var e=g.navigator.userAgent;-1===e.indexOf("MSIE ")&&-1===e.indexOf("Trident/")&&-1===e.indexOf("Edge/")||(N=!0)}catch(t){}return N}Zone.__load_patch("toString",function(e){var t=Function.prototype.toString,n=v("OriginalDelegate"),r=v("Promise"),o=v("Error"),a=function(){if("function"==typeof this){var a=this[n];if(a)return"function"==typeof a?t.call(a):Object.prototype.toString.call(a);if(this===Promise){var i=e[r];if(i)return t.call(i)}if(this===Error){var c=e[o];if(c)return t.call(c)}}return t.call(this)};a[n]=t,Function.prototype.toString=a;var i=Object.prototype.toString;Object.prototype.toString=function(){return this instanceof Promise?"[object Promise]":i.call(this)}});var A=!1;if("undefined"!=typeof window)try{var H=Object.defineProperty({},"passive",{get:function(){A=!0}});window.addEventListener("test",H,H),window.removeEventListener("test",H,H)}catch(Oe){A=!1}var G={useG:!0},B={},W={},q=new RegExp("^"+p+"(\\w+)(true|false)$"),U=v("propagationStopped");function X(e,t,r){var o=r&&r.add||i,a=r&&r.rm||c,s=r&&r.listeners||"eventListeners",u=r&&r.rmAll||"removeAllListeners",h=v(o),d="."+o+":",y="prependListener",g="."+y+":",_=function(e,t,n){if(!e.isRemoved){var r=e.callback;"object"==typeof r&&r.handleEvent&&(e.callback=function(e){return r.handleEvent(e)},e.originalDelegate=r),e.invoke(e,t,[n]);var o=e.options;o&&"object"==typeof o&&o.once&&t[a].call(t,n.type,e.originalDelegate?e.originalDelegate:e.callback,o)}},b=function(t){if(t=t||e.event){var n=this||t.target||e,r=n[B[t.type][f]];if(r)if(1===r.length)_(r[0],n,t);else for(var o=r.slice(),a=0;a<o.length&&(!t||!0!==t[U]);a++)_(o[a],n,t)}},m=function(t){if(t=t||e.event){var n=this||t.target||e,r=n[B[t.type][l]];if(r)if(1===r.length)_(r[0],n,t);else for(var o=r.slice(),a=0;a<o.length&&(!t||!0!==t[U]);a++)_(o[a],n,t)}};function k(t,r){if(!t)return!1;var i=!0;r&&void 0!==r.useG&&(i=r.useG);var c=r&&r.vh,_=!0;r&&void 0!==r.chkDup&&(_=r.chkDup);var k=!1;r&&void 0!==r.rt&&(k=r.rt);for(var T=t;T&&!T.hasOwnProperty(o);)T=n(T);if(!T&&t[o]&&(T=t),!T)return!1;if(T[h])return!1;var w,O=r&&r.eventNameToString,S={},P=T[h]=T[o],D=T[v(a)]=T[a],Z=T[v(s)]=T[s],j=T[v(u)]=T[u];function C(e){A||"boolean"==typeof S.options||null==S.options||(e.options=!!S.options.capture,S.options=e.options)}r&&r.prepend&&(w=T[v(r.prepend)]=T[r.prepend]);var z=i?function(e){if(!S.isExisting)return C(e),P.call(S.target,S.eventName,S.capture?m:b,S.options)}:function(e){return C(e),P.call(S.target,S.eventName,e.invoke,S.options)},I=i?function(e){if(!e.isRemoved){var t=B[e.eventName],n=void 0;t&&(n=t[e.capture?l:f]);var r=n&&e.target[n];if(r)for(var o=0;o<r.length;o++)if(r[o]===e){r.splice(o,1),e.isRemoved=!0,0===r.length&&(e.allRemoved=!0,e.target[n]=null);break}}if(e.allRemoved)return D.call(e.target,e.eventName,e.capture?m:b,e.options)}:function(e){return D.call(e.target,e.eventName,e.invoke,e.options)},R=r&&r.diff?r.diff:function(e,t){var n=typeof t;return"function"===n&&e.callback===t||"object"===n&&e.originalDelegate===t},M=Zone[v("BLACK_LISTED_EVENTS")],L=function(t,n,o,a,s,u){return void 0===s&&(s=!1),void 0===u&&(u=!1),function(){var h=this||e,d=arguments[0];r&&r.transferEventName&&(d=r.transferEventName(d));var v=arguments[1];if(!v)return t.apply(this,arguments);if(E&&"uncaughtException"===d)return t.apply(this,arguments);var y=!1;if("function"!=typeof v){if(!v.handleEvent)return t.apply(this,arguments);y=!0}if(!c||c(t,v,h,arguments)){var g,b=arguments[2];if(M)for(var m=0;m<M.length;m++)if(d===M[m])return t.apply(this,arguments);var k=!1;void 0===b?g=!1:!0===b?g=!0:!1===b?g=!1:(g=!!b&&!!b.capture,k=!!b&&!!b.once);var T,w=Zone.current,P=B[d];if(P)T=P[g?l:f];else{var D=(O?O(d):d)+f,Z=(O?O(d):d)+l,j=p+D,C=p+Z;B[d]={},B[d][f]=j,B[d][l]=C,T=g?C:j}var z,I=h[T],x=!1;if(I){if(x=!0,_)for(m=0;m<I.length;m++)if(R(I[m],v))return}else I=h[T]=[];var L=h.constructor.name,N=W[L];N&&(z=N[d]),z||(z=L+n+(O?O(d):d)),S.options=b,k&&(S.options.once=!1),S.target=h,S.capture=g,S.eventName=d,S.isExisting=x;var F=i?G:void 0;F&&(F.taskData=S);var H=w.scheduleEventTask(z,v,F,o,a);return S.target=null,F&&(F.taskData=null),k&&(b.once=!0),(A||"boolean"!=typeof H.options)&&(H.options=b),H.target=h,H.capture=g,H.eventName=d,y&&(H.originalDelegate=v),u?I.unshift(H):I.push(H),s?h:void 0}}};return T[o]=L(P,d,z,I,k),w&&(T[y]=L(w,g,function(e){return w.call(S.target,S.eventName,e.invoke,S.options)},I,k,!0)),T[a]=function(){var t=this||e,n=arguments[0];r&&r.transferEventName&&(n=r.transferEventName(n));var o,a=arguments[2];o=void 0!==a&&(!0===a||!1!==a&&!!a&&!!a.capture);var i=arguments[1];if(!i)return D.apply(this,arguments);if(!c||c(D,i,t,arguments)){var s,u=B[n];u&&(s=u[o?l:f]);var h=s&&t[s];if(h)for(var d=0;d<h.length;d++){var v=h[d];if(R(v,i)){if(h.splice(d,1),v.isRemoved=!0,0===h.length&&(v.allRemoved=!0,t[s]=null,"string"==typeof n)){var y=p+"ON_PROPERTY"+n;t[y]=null}return v.zone.cancelTask(v),k?t:void 0}}return D.apply(this,arguments)}},T[s]=function(){var t=this||e,n=arguments[0];r&&r.transferEventName&&(n=r.transferEventName(n));for(var o=[],a=V(t,O?O(n):n),i=0;i<a.length;i++){var c=a[i],s=c.originalDelegate?c.originalDelegate:c.callback;o.push(s)}return o},T[u]=function(){var t=this||e,n=arguments[0];if(n){r&&r.transferEventName&&(n=r.transferEventName(n));var o=B[n];if(o){var i=o[f],c=o[l],s=t[i],p=t[c];if(s){var h=s.slice();for(y=0;y<h.length;y++)this[a].call(this,n,(d=h[y]).originalDelegate?d.originalDelegate:d.callback,d.options)}if(p)for(h=p.slice(),y=0;y<h.length;y++){var d;this[a].call(this,n,(d=h[y]).originalDelegate?d.originalDelegate:d.callback,d.options)}}}else{for(var v=Object.keys(t),y=0;y<v.length;y++){var g=v[y],_=q.exec(g),b=_&&_[1];b&&"removeListener"!==b&&this[u].call(this,b)}this[u].call(this,"removeListener")}if(k)return this},x(T[o],P),x(T[a],D),j&&x(T[u],j),Z&&x(T[s],Z),!0}for(var T=[],w=0;w<t.length;w++)T[w]=k(t[w],r);return T}function V(e,t){var n=[];for(var r in e){var o=q.exec(r),a=o&&o[1];if(a&&(!t||a===t)){var i=e[r];if(i)for(var c=0;c<i.length;c++)n.push(i[c])}}return n}function Y(e,t){var n=e.Event;n&&n.prototype&&t.patchMethod(n.prototype,"stopImmediatePropagation",function(e){return function(t,n){t[U]=!0,e&&e.apply(t,n)}})}function J(e,t,n,r,o){var a=Zone.__symbol__(r);if(!t[a]){var i=t[a]=t[r];t[r]=function(a,c,s){return c&&c.prototype&&o.forEach(function(t){var o=n+"."+r+"::"+t,a=c.prototype;if(a.hasOwnProperty(t)){var i=e.ObjectGetOwnPropertyDescriptor(a,t);i&&i.value?(i.value=e.wrapWithCurrentZone(i.value,o),e._redefineProperty(c.prototype,t,i)):a[t]&&(a[t]=e.wrapWithCurrentZone(a[t],o))}else a[t]&&(a[t]=e.wrapWithCurrentZone(a[t],o))}),i.call(t,a,c,s)},e.attachOriginToPatched(t[r],i)}}var K,Q,$,ee,te,ne=["absolutedeviceorientation","afterinput","afterprint","appinstalled","beforeinstallprompt","beforeprint","beforeunload","devicelight","devicemotion","deviceorientation","deviceorientationabsolute","deviceproximity","hashchange","languagechange","message","mozbeforepaint","offline","online","paint","pageshow","pagehide","popstate","rejectionhandled","storage","unhandledrejection","unload","userproximity","vrdisplyconnected","vrdisplaydisconnected","vrdisplaypresentchange"],re=["encrypted","waitingforkey","msneedkey","mozinterruptbegin","mozinterruptend"],oe=["load"],ae=["blur","error","focus","load","resize","scroll","messageerror"],ie=["bounce","finish","start"],ce=["loadstart","progress","abort","error","load","progress","timeout","loadend","readystatechange"],se=["upgradeneeded","complete","abort","success","error","blocked","versionchange","close"],ue=["close","error","open","message"],le=["error","message"],fe=["abort","animationcancel","animationend","animationiteration","auxclick","beforeinput","blur","cancel","canplay","canplaythrough","change","compositionstart","compositionupdate","compositionend","cuechange","click","close","contextmenu","curechange","dblclick","drag","dragend","dragenter","dragexit","dragleave","dragover","drop","durationchange","emptied","ended","error","focus","focusin","focusout","gotpointercapture","input","invalid","keydown","keypress","keyup","load","loadstart","loadeddata","loadedmetadata","lostpointercapture","mousedown","mouseenter","mouseleave","mousemove","mouseout","mouseover","mouseup","mousewheel","orientationchange","pause","play","playing","pointercancel","pointerdown","pointerenter","pointerleave","pointerlockchange","mozpointerlockchange","webkitpointerlockerchange","pointerlockerror","mozpointerlockerror","webkitpointerlockerror","pointermove","pointout","pointerover","pointerup","progress","ratechange","reset","resize","scroll","seeked","seeking","select","selectionchange","selectstart","show","sort","stalled","submit","suspend","timeupdate","volumechange","touchcancel","touchmove","touchstart","touchend","transitioncancel","transitionend","waiting","wheel"].concat(["webglcontextrestored","webglcontextlost","webglcontextcreationerror"],["autocomplete","autocompleteerror"],["toggle"],["afterscriptexecute","beforescriptexecute","DOMContentLoaded","freeze","fullscreenchange","mozfullscreenchange","webkitfullscreenchange","msfullscreenchange","fullscreenerror","mozfullscreenerror","webkitfullscreenerror","msfullscreenerror","readystatechange","visibilitychange","resume"],ne,["beforecopy","beforecut","beforepaste","copy","cut","paste","dragstart","loadend","animationstart","search","transitionrun","transitionstart","webkitanimationend","webkitanimationiteration","webkitanimationstart","webkittransitionend"],["activate","afterupdate","ariarequest","beforeactivate","beforedeactivate","beforeeditfocus","beforeupdate","cellchange","controlselect","dataavailable","datasetchanged","datasetcomplete","errorupdate","filterchange","layoutcomplete","losecapture","move","moveend","movestart","propertychange","resizeend","resizestart","rowenter","rowexit","rowsdelete","rowsinserted","command","compassneedscalibration","deactivate","help","mscontentzoom","msmanipulationstatechanged","msgesturechange","msgesturedoubletap","msgestureend","msgesturehold","msgesturestart","msgesturetap","msgotpointercapture","msinertiastart","mslostpointercapture","mspointercancel","mspointerdown","mspointerenter","mspointerhover","mspointerleave","mspointermove","mspointerout","mspointerover","mspointerup","pointerout","mssitemodejumplistitemremoved","msthumbnailclick","stop","storagecommit"]);function pe(e,t,n){if(!n||0===n.length)return t;var r=n.filter(function(t){return t.target===e});if(!r||0===r.length)return t;var o=r[0].ignoreProperties;return t.filter(function(e){return-1===o.indexOf(e)})}function he(e,t,n,r){e&&j(e,pe(e,t,n),r)}function de(e,t){if((!E||S)&&!Zone[e.symbol("patchEvents")]){var r="undefined"!=typeof WebSocket,o=t.__Zone_ignore_on_properties;if(O){var a=window,i=function(){try{var e=g.navigator.userAgent;if(-1!==e.indexOf("MSIE ")||-1!==e.indexOf("Trident/"))return!0}catch(t){}return!1}?[{target:a,ignoreProperties:["error"]}]:[];he(a,fe.concat(["messageerror"]),o?o.concat(i):o,n(a)),he(Document.prototype,fe,o),void 0!==a.SVGElement&&he(a.SVGElement.prototype,fe,o),he(Element.prototype,fe,o),he(HTMLElement.prototype,fe,o),he(HTMLMediaElement.prototype,re,o),he(HTMLFrameSetElement.prototype,ne.concat(ae),o),he(HTMLBodyElement.prototype,ne.concat(ae),o),he(HTMLFrameElement.prototype,oe,o),he(HTMLIFrameElement.prototype,oe,o);var c=a.HTMLMarqueeElement;c&&he(c.prototype,ie,o);var s=a.Worker;s&&he(s.prototype,le,o)}var u=t.XMLHttpRequest;u&&he(u.prototype,ce,o);var l=t.XMLHttpRequestEventTarget;l&&he(l&&l.prototype,ce,o),"undefined"!=typeof IDBIndex&&(he(IDBIndex.prototype,se,o),he(IDBRequest.prototype,se,o),he(IDBOpenDBRequest.prototype,se,o),he(IDBDatabase.prototype,se,o),he(IDBTransaction.prototype,se,o),he(IDBCursor.prototype,se,o)),r&&he(WebSocket.prototype,ue,o)}}function ve(){K=Zone.__symbol__,Q=Object[K("defineProperty")]=Object.defineProperty,$=Object[K("getOwnPropertyDescriptor")]=Object.getOwnPropertyDescriptor,ee=Object.create,te=K("unconfigurables"),Object.defineProperty=function(e,t,n){if(ge(e,t))throw new TypeError("Cannot assign to read only property '"+t+"' of "+e);var r=n.configurable;return"prototype"!==t&&(n=_e(e,t,n)),be(e,t,n,r)},Object.defineProperties=function(e,t){return Object.keys(t).forEach(function(n){Object.defineProperty(e,n,t[n])}),e},Object.create=function(e,t){return"object"!=typeof t||Object.isFrozen(t)||Object.keys(t).forEach(function(n){t[n]=_e(e,n,t[n])}),ee(e,t)},Object.getOwnPropertyDescriptor=function(e,t){var n=$(e,t);return n&&ge(e,t)&&(n.configurable=!1),n}}function ye(e,t,n){var r=n.configurable;return be(e,t,n=_e(e,t,n),r)}function ge(e,t){return e&&e[te]&&e[te][t]}function _e(e,t,n){return Object.isFrozen(n)||(n.configurable=!0),n.configurable||(e[te]||Object.isFrozen(e)||Q(e,te,{writable:!0,value:{}}),e[te]&&(e[te][t]=!0)),n}function be(e,t,n,r){try{return Q(e,t,n)}catch(a){if(!n.configurable)throw a;void 0===r?delete n.configurable:n.configurable=r;try{return Q(e,t,n)}catch(a){var o=null;try{o=JSON.stringify(n)}catch(a){o=n.toString()}console.log("Attempting to configure '"+t+"' with descriptor '"+o+"' on object '"+e+"' and got error, giving up: "+a)}}}function me(e,t){var n=t.getGlobalObjects(),r=n.eventNames,o=n.globalSources,a=n.zoneSymbolEventNames,i=n.TRUE_STR,c=n.FALSE_STR,s=n.ZONE_SYMBOL_PREFIX,u="ApplicationCache,EventSource,FileReader,InputMethodContext,MediaController,MessagePort,Node,Performance,SVGElementInstance,SharedWorker,TextTrack,TextTrackCue,TextTrackList,WebKitNamedFlow,Window,Worker,WorkerGlobalScope,XMLHttpRequest,XMLHttpRequestEventTarget,XMLHttpRequestUpload,IDBRequest,IDBOpenDBRequest,IDBDatabase,IDBTransaction,IDBCursor,DBIndex,WebSocket".split(","),l=[],f=e.wtf,p="Anchor,Area,Audio,BR,Base,BaseFont,Body,Button,Canvas,Content,DList,Directory,Div,Embed,FieldSet,Font,Form,Frame,FrameSet,HR,Head,Heading,Html,IFrame,Image,Input,Keygen,LI,Label,Legend,Link,Map,Marquee,Media,Menu,Meta,Meter,Mod,OList,Object,OptGroup,Option,Output,Paragraph,Pre,Progress,Quote,Script,Select,Source,Span,Style,TableCaption,TableCell,TableCol,Table,TableRow,TableSection,TextArea,Title,Track,UList,Unknown,Video".split(",");f?l=p.map(function(e){return"HTML"+e+"Element"}).concat(u):e.EventTarget?l.push("EventTarget"):l=u;for(var h=e.__Zone_disable_IE_check||!1,d=e.__Zone_enable_cross_context_check||!1,v=t.isIEOrEdge(),y="function __BROWSERTOOLS_CONSOLE_SAFEFUNC() { [native code] }",g={MSPointerCancel:"pointercancel",MSPointerDown:"pointerdown",MSPointerEnter:"pointerenter",MSPointerHover:"pointerhover",MSPointerLeave:"pointerleave",MSPointerMove:"pointermove",MSPointerOut:"pointerout",MSPointerOver:"pointerover",MSPointerUp:"pointerup"},_=0;_<r.length;_++){var b=s+((E=r[_])+c),m=s+(E+i);a[E]={},a[E][c]=b,a[E][i]=m}for(_=0;_<p.length;_++)for(var k=p[_],T=o[k]={},w=0;w<r.length;w++){var E;T[E=r[w]]=k+".addEventListener:"+E}var O=[];for(_=0;_<l.length;_++){var S=e[l[_]];O.push(S&&S.prototype)}return t.patchEventTarget(e,O,{vh:function(e,t,n,r){if(!h&&v){if(d)try{var o;if("[object FunctionWrapper]"===(o=t.toString())||o==y)return e.apply(n,r),!1}catch(a){return e.apply(n,r),!1}else if("[object FunctionWrapper]"===(o=t.toString())||o==y)return e.apply(n,r),!1}else if(d)try{t.toString()}catch(a){return e.apply(n,r),!1}return!0},transferEventName:function(e){return g[e]||e}}),Zone[t.symbol("patchEventTarget")]=!!e.EventTarget,!0}function ke(e,t){var n=e.getGlobalObjects();if((!n.isNode||n.isMix)&&!function(e,t){var n=e.getGlobalObjects();if((n.isBrowser||n.isMix)&&!e.ObjectGetOwnPropertyDescriptor(HTMLElement.prototype,"onclick")&&"undefined"!=typeof Element){var r=e.ObjectGetOwnPropertyDescriptor(Element.prototype,"onclick");if(r&&!r.configurable)return!1;if(r){e.ObjectDefineProperty(Element.prototype,"onclick",{enumerable:!0,configurable:!0,get:function(){return!0}});var o=!!document.createElement("div").onclick;return e.ObjectDefineProperty(Element.prototype,"onclick",r),o}}var a=t.XMLHttpRequest;if(!a)return!1;var i=a.prototype,c=e.ObjectGetOwnPropertyDescriptor(i,"onreadystatechange");if(c)return e.ObjectDefineProperty(i,"onreadystatechange",{enumerable:!0,configurable:!0,get:function(){return!0}}),o=!!(u=new a).onreadystatechange,e.ObjectDefineProperty(i,"onreadystatechange",c||{}),o;var s=e.symbol("fake");e.ObjectDefineProperty(i,"onreadystatechange",{enumerable:!0,configurable:!0,get:function(){return this[s]},set:function(e){this[s]=e}});var u=new a,l=function(){};return u.onreadystatechange=l,o=u[s]===l,u.onreadystatechange=null,o}(e,t)){var r="undefined"!=typeof WebSocket;!function(e){for(var t=e.getGlobalObjects().eventNames,n=e.symbol("unbound"),r=function(r){var o=t[r],a="on"+o;self.addEventListener(o,function(t){var r,o,i=t.target;for(o=i?i.constructor.name+"."+a:"unknown."+a;i;)i[a]&&!i[a][n]&&((r=e.wrapWithCurrentZone(i[a],o))[n]=i[a],i[a]=r),i=i.parentElement},!0)},o=0;o<t.length;o++)r(o)}(e),e.patchClass("XMLHttpRequest"),r&&function(e,t){var n=e.getGlobalObjects(),r=n.ADD_EVENT_LISTENER_STR,o=n.REMOVE_EVENT_LISTENER_STR,a=t.WebSocket;t.EventTarget||e.patchEventTarget(t,[a.prototype]),t.WebSocket=function(t,n){var i,c,s=arguments.length>1?new a(t,n):new a(t),u=e.ObjectGetOwnPropertyDescriptor(s,"onmessage");return u&&!1===u.configurable?(i=e.ObjectCreate(s),c=s,[r,o,"send","close"].forEach(function(t){i[t]=function(){var n=e.ArraySlice.call(arguments);if(t===r||t===o){var a=n.length>0?n[0]:void 0;if(a){var c=Zone.__symbol__("ON_PROPERTY"+a);s[c]=i[c]}}return s[t].apply(s,n)}})):i=s,e.patchOnProperties(i,["close","error","message","open"],c),i};var i=t.WebSocket;for(var c in a)i[c]=a[c]}(e,t),Zone[e.symbol("patchEvents")]=!0}}Zone.__load_patch("util",function(n,o,s){s.patchOnProperties=j,s.patchMethod=R,s.bindArguments=k,s.patchMacroTask=M;var u=o.__symbol__("BLACK_LISTED_EVENTS"),d=o.__symbol__("UNPATCHED_EVENTS");n[d]&&(n[u]=n[d]),n[u]&&(o[u]=o[d]=n[u]),s.patchEventPrototype=Y,s.patchEventTarget=X,s.isIEOrEdge=F,s.ObjectDefineProperty=t,s.ObjectGetOwnPropertyDescriptor=e,s.ObjectCreate=r,s.ArraySlice=a,s.patchClass=z,s.wrapWithCurrentZone=h,s.filterProperties=pe,s.attachOriginToPatched=x,s._redefineProperty=Object.defineProperty,s.patchCallbacks=J,s.getGlobalObjects=function(){return{globalSources:W,zoneSymbolEventNames:B,eventNames:fe,isBrowser:O,isMix:S,isNode:E,TRUE_STR:l,FALSE_STR:f,ZONE_SYMBOL_PREFIX:p,ADD_EVENT_LISTENER_STR:i,REMOVE_EVENT_LISTENER_STR:c}}}),function(e){e[(e.__Zone_symbol_prefix||"__zone_symbol__")+"legacyPatch"]=function(){var t=e.Zone;t.__load_patch("defineProperty",function(e,t,n){n._redefineProperty=ye,ve()}),t.__load_patch("registerElement",function(e,t,n){!function(e,t){var n=t.getGlobalObjects();(n.isBrowser||n.isMix)&&"registerElement"in e.document&&t.patchCallbacks(t,document,"Document","registerElement",["createdCallback","attachedCallback","detachedCallback","attributeChangedCallback"])}(e,n)}),t.__load_patch("EventTargetLegacy",function(e,t,n){me(e,n),ke(n,e)})}}("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{});var Te=v("zoneTask");function we(e,t,n,r){var o=null,a=null;n+=r;var i={};function c(t){var n=t.data;return n.args[0]=function(){try{t.invoke.apply(this,arguments)}finally{t.data&&t.data.isPeriodic||("number"==typeof n.handleId?delete i[n.handleId]:n.handleId&&(n.handleId[Te]=null))}},n.handleId=o.apply(e,n.args),t}function s(e){return a(e.data.handleId)}o=R(e,t+=r,function(n){return function(o,a){if("function"==typeof a[0]){var u=d(t,a[0],{isPeriodic:"Interval"===r,delay:"Timeout"===r||"Interval"===r?a[1]||0:void 0,args:a},c,s);if(!u)return u;var l=u.data.handleId;return"number"==typeof l?i[l]=u:l&&(l[Te]=u),l&&l.ref&&l.unref&&"function"==typeof l.ref&&"function"==typeof l.unref&&(u.ref=l.ref.bind(l),u.unref=l.unref.bind(l)),"number"==typeof l||l?l:u}return n.apply(e,a)}}),a=R(e,n,function(t){return function(n,r){var o,a=r[0];"number"==typeof a?o=i[a]:(o=a&&a[Te])||(o=a),o&&"string"==typeof o.type?"notScheduled"!==o.state&&(o.cancelFn&&o.data.isPeriodic||0===o.runCount)&&("number"==typeof a?delete i[a]:a&&(a[Te]=null),o.zone.cancelTask(o)):t.apply(e,r)}})}function Ee(e,t){if(!Zone[t.symbol("patchEventTarget")]){for(var n=t.getGlobalObjects(),r=n.eventNames,o=n.zoneSymbolEventNames,a=n.TRUE_STR,i=n.FALSE_STR,c=n.ZONE_SYMBOL_PREFIX,s=0;s<r.length;s++){var u=r[s],l=c+(u+i),f=c+(u+a);o[u]={},o[u][i]=l,o[u][a]=f}var p=e.EventTarget;if(p&&p.prototype)return t.patchEventTarget(e,[p&&p.prototype]),!0}}Zone.__load_patch("legacy",function(e){var t=e[Zone.__symbol__("legacyPatch")];t&&t()}),Zone.__load_patch("timers",function(e){we(e,"set","clear","Timeout"),we(e,"set","clear","Interval"),we(e,"set","clear","Immediate")}),Zone.__load_patch("requestAnimationFrame",function(e){we(e,"request","cancel","AnimationFrame"),we(e,"mozRequest","mozCancel","AnimationFrame"),we(e,"webkitRequest","webkitCancel","AnimationFrame")}),Zone.__load_patch("blocking",function(e,t){for(var n=["alert","prompt","confirm"],r=0;r<n.length;r++)R(e,n[r],function(n,r,o){return function(r,a){return t.current.run(n,e,a,o)}})}),Zone.__load_patch("EventTarget",function(e,t,n){!function(e,t){t.patchEventPrototype(e,t)}(e,n),Ee(e,n);var r=e.XMLHttpRequestEventTarget;r&&r.prototype&&n.patchEventTarget(e,[r.prototype]),z("MutationObserver"),z("WebKitMutationObserver"),z("IntersectionObserver"),z("FileReader")}),Zone.__load_patch("on_property",function(e,t,n){de(n,e)}),Zone.__load_patch("customElements",function(e,t,n){!function(e,t){var n=t.getGlobalObjects();(n.isBrowser||n.isMix)&&e.customElements&&"customElements"in e&&t.patchCallbacks(t,e.customElements,"customElements","define",["connectedCallback","disconnectedCallback","adoptedCallback","attributeChangedCallback"])}(e,n)}),Zone.__load_patch("XHR",function(e,t){!function(l){var f=e.XMLHttpRequest;if(f){var p=f.prototype,h=p[s],y=p[u];if(!h){var g=e.XMLHttpRequestEventTarget;if(g){var _=g.prototype;h=_[s],y=_[u]}}var b="readystatechange",m="scheduled",k=R(p,"open",function(){return function(e,t){return e[r]=0==t[2],e[i]=t[1],k.apply(e,t)}}),T=v("fetchTaskAborting"),w=v("fetchTaskScheduling"),E=R(p,"send",function(){return function(e,n){if(!0===t.current[w])return E.apply(e,n);if(e[r])return E.apply(e,n);var o={target:e,url:e[i],isPeriodic:!1,args:n,aborted:!1},a=d("XMLHttpRequest.send",P,o,S,D);e&&!0===e[c]&&!o.aborted&&a.state===m&&a.invoke()}}),O=R(p,"abort",function(){return function(e,r){var o=e[n];if(o&&"string"==typeof o.type){if(null==o.cancelFn||o.data&&o.data.aborted)return;o.zone.cancelTask(o)}else if(!0===t.current[T])return O.apply(e,r)}})}function S(e){var r=e.data,i=r.target;i[a]=!1,i[c]=!1;var l=i[o];h||(h=i[s],y=i[u]),l&&y.call(i,b,l);var f=i[o]=function(){if(i.readyState===i.DONE)if(!r.aborted&&i[a]&&e.state===m){var n=i[t.__symbol__("loadfalse")];if(n&&n.length>0){var o=e.invoke;e.invoke=function(){for(var n=i[t.__symbol__("loadfalse")],a=0;a<n.length;a++)n[a]===e&&n.splice(a,1);r.aborted||e.state!==m||o.call(e)},n.push(e)}else e.invoke()}else r.aborted||!1!==i[a]||(i[c]=!0)};return h.call(i,b,f),i[n]||(i[n]=e),E.apply(i,r.args),i[a]=!0,e}function P(){}function D(e){var t=e.data;return t.aborted=!0,O.apply(t.target,t.args)}}();var n=v("xhrTask"),r=v("xhrSync"),o=v("xhrListener"),a=v("xhrScheduled"),i=v("xhrURL"),c=v("xhrErrorBeforeScheduled")}),Zone.__load_patch("geolocation",function(t){t.navigator&&t.navigator.geolocation&&function(t,n){for(var r=t.constructor.name,o=function(o){var a=n[o],i=t[a];if(i){if(!T(e(t,a)))return"continue";t[a]=function(e){var t=function(){return e.apply(this,k(arguments,r+"."+a))};return x(t,e),t}(i)}},a=0;a<n.length;a++)o(a)}(t.navigator.geolocation,["getCurrentPosition","watchPosition"])}),Zone.__load_patch("PromiseRejectionEvent",function(e,t){function n(t){return function(n){V(e,t).forEach(function(r){var o=e.PromiseRejectionEvent;if(o){var a=new o(t,{promise:n.promise,reason:n.rejection});r.invoke(a)}})}}e.PromiseRejectionEvent&&(t[v("unhandledPromiseRejectionHandler")]=n("unhandledrejection"),t[v("rejectionHandledHandler")]=n("rejectionhandled"))})},"function"==typeof define&&n("PDX0")?define(r):r()},mrSG:function(e,t,n){"use strict";n.r(t),n.d(t,"__extends",function(){return o}),n.d(t,"__assign",function(){return a}),n.d(t,"__rest",function(){return i}),n.d(t,"__decorate",function(){return c}),n.d(t,"__param",function(){return s}),n.d(t,"__metadata",function(){return u}),n.d(t,"__awaiter",function(){return l}),n.d(t,"__generator",function(){return f}),n.d(t,"__exportStar",function(){return p}),n.d(t,"__values",function(){return h}),n.d(t,"__read",function(){return d}),n.d(t,"__spread",function(){return v}),n.d(t,"__spreadArrays",function(){return y}),n.d(t,"__await",function(){return g}),n.d(t,"__asyncGenerator",function(){return _}),n.d(t,"__asyncDelegator",function(){return b}),n.d(t,"__asyncValues",function(){return m}),n.d(t,"__makeTemplateObject",function(){return k}),n.d(t,"__importStar",function(){return T}),n.d(t,"__importDefault",function(){return w});var r=function(e,t){return(r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n])})(e,t)};function o(e,t){function n(){this.constructor=e}r(e,t),e.prototype=null===t?Object.create(t):(n.prototype=t.prototype,new n)}var a=function(){return(a=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)};function i(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var o=0;for(r=Object.getOwnPropertySymbols(e);o<r.length;o++)t.indexOf(r[o])<0&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]])}return n}function c(e,t,n,r){var o,a=arguments.length,i=a<3?t:null===r?r=Object.getOwnPropertyDescriptor(t,n):r;if("object"==typeof Reflect&&"function"==typeof Reflect.decorate)i=Reflect.decorate(e,t,n,r);else for(var c=e.length-1;c>=0;c--)(o=e[c])&&(i=(a<3?o(i):a>3?o(t,n,i):o(t,n))||i);return a>3&&i&&Object.defineProperty(t,n,i),i}function s(e,t){return function(n,r){t(n,r,e)}}function u(e,t){if("object"==typeof Reflect&&"function"==typeof Reflect.metadata)return Reflect.metadata(e,t)}function l(e,t,n,r){return new(n||(n=Promise))(function(o,a){function i(e){try{s(r.next(e))}catch(t){a(t)}}function c(e){try{s(r.throw(e))}catch(t){a(t)}}function s(e){e.done?o(e.value):new n(function(t){t(e.value)}).then(i,c)}s((r=r.apply(e,t||[])).next())})}function f(e,t){var n,r,o,a,i={label:0,sent:function(){if(1&o[0])throw o[1];return o[1]},trys:[],ops:[]};return a={next:c(0),throw:c(1),return:c(2)},"function"==typeof Symbol&&(a[Symbol.iterator]=function(){return this}),a;function c(a){return function(c){return function(a){if(n)throw new TypeError("Generator is already executing.");for(;i;)try{if(n=1,r&&(o=2&a[0]?r.return:a[0]?r.throw||((o=r.return)&&o.call(r),0):r.next)&&!(o=o.call(r,a[1])).done)return o;switch(r=0,o&&(a=[2&a[0],o.value]),a[0]){case 0:case 1:o=a;break;case 4:return i.label++,{value:a[1],done:!1};case 5:i.label++,r=a[1],a=[0];continue;case 7:a=i.ops.pop(),i.trys.pop();continue;default:if(!(o=(o=i.trys).length>0&&o[o.length-1])&&(6===a[0]||2===a[0])){i=0;continue}if(3===a[0]&&(!o||a[1]>o[0]&&a[1]<o[3])){i.label=a[1];break}if(6===a[0]&&i.label<o[1]){i.label=o[1],o=a;break}if(o&&i.label<o[2]){i.label=o[2],i.ops.push(a);break}o[2]&&i.ops.pop(),i.trys.pop();continue}a=t.call(e,i)}catch(c){a=[6,c],r=0}finally{n=o=0}if(5&a[0])throw a[1];return{value:a[0]?a[1]:void 0,done:!0}}([a,c])}}}function p(e,t){for(var n in e)t.hasOwnProperty(n)||(t[n]=e[n])}function h(e){var t="function"==typeof Symbol&&e[Symbol.iterator],n=0;return t?t.call(e):{next:function(){return e&&n>=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}}}function d(e,t){var n="function"==typeof Symbol&&e[Symbol.iterator];if(!n)return e;var r,o,a=n.call(e),i=[];try{for(;(void 0===t||t-- >0)&&!(r=a.next()).done;)i.push(r.value)}catch(c){o={error:c}}finally{try{r&&!r.done&&(n=a.return)&&n.call(a)}finally{if(o)throw o.error}}return i}function v(){for(var e=[],t=0;t<arguments.length;t++)e=e.concat(d(arguments[t]));return e}function y(){for(var e=0,t=0,n=arguments.length;t<n;t++)e+=arguments[t].length;var r=Array(e),o=0;for(t=0;t<n;t++)for(var a=arguments[t],i=0,c=a.length;i<c;i++,o++)r[o]=a[i];return r}function g(e){return this instanceof g?(this.v=e,this):new g(e)}function _(e,t,n){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var r,o=n.apply(e,t||[]),a=[];return r={},i("next"),i("throw"),i("return"),r[Symbol.asyncIterator]=function(){return this},r;function i(e){o[e]&&(r[e]=function(t){return new Promise(function(n,r){a.push([e,t,n,r])>1||c(e,t)})})}function c(e,t){try{(n=o[e](t)).value instanceof g?Promise.resolve(n.value.v).then(s,u):l(a[0][2],n)}catch(r){l(a[0][3],r)}var n}function s(e){c("next",e)}function u(e){c("throw",e)}function l(e,t){e(t),a.shift(),a.length&&c(a[0][0],a[0][1])}}function b(e){var t,n;return t={},r("next"),r("throw",function(e){throw e}),r("return"),t[Symbol.iterator]=function(){return this},t;function r(r,o){t[r]=e[r]?function(t){return(n=!n)?{value:g(e[r](t)),done:"return"===r}:o?o(t):t}:o}}function m(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t,n=e[Symbol.asyncIterator];return n?n.call(e):(e=h(e),t={},r("next"),r("throw"),r("return"),t[Symbol.asyncIterator]=function(){return this},t);function r(n){t[n]=e[n]&&function(t){return new Promise(function(r,o){!function(e,t,n,r){Promise.resolve(r).then(function(t){e({value:t,done:n})},t)}(r,o,(t=e[n](t)).done,t.value)})}}}function k(e,t){return Object.defineProperty?Object.defineProperty(e,"raw",{value:t}):e.raw=t,e}function T(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function w(e){return e&&e.__esModule?e:{default:e}}}},[[2,0]]]); |
const binary_search = require('./binary_search')
const binary_search_recursive = require('./binary_search_recursive')
// 测试组数
let testCnt = 100
// 最大数组长度
const MAX_ARR_LENGTH = 50
// 最大元素大小
const MAX_ARR_ITEM = 9999
/**
* 生成不重复的随机数组
* @return {number[]}
*/
const generateDistinctRandArr = () => {
let rands = [], rand
return new Array(
Math.ceil(Math.random() * MAX_ARR_LENGTH)
)
.fill(0)
.map(() => {
while (~rands.indexOf(rand = Math.random() * MAX_ARR_ITEM)) {
}
rands.push(rand)
return rand
})
.sort((a, b) => {
return a - b
})
}
while (testCnt-- > 0) {
// 目标数组
let arr = generateDistinctRandArr()
// 查找目标下标
let targetIndex = Math.floor(Math.random() * arr.length)
// 查找结果
let rs
// 测试普通版本
try {
if ((rs = binary_search(arr, arr[targetIndex])) !== targetIndex) {
console.error(arr, targetIndex, 'binary_search fail!')
}
}
catch (e) {
console.error(arr, targetIndex, 'binary_search ERROR!')
}
// 测试递归版本
try {
if ((rs = binary_search_recursive(arr, arr[targetIndex])) !== targetIndex) {
console.error(arr, targetIndex, 'binary_search_recursive fail!')
}
}
catch (e) {
console.error(arr, targetIndex, 'binary_search_recursive ERROR!')
}
} |
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "MMUIViewController.h"
#import "EmoticonBoughtListMgrExt.h"
#import "EmoticonStoreCellDelegate.h"
#import "EmoticonStoreMgrExt.h"
#import "EmotionCollectionFooterViewDelegate.h"
#import "LoopPageScrollViewDataSourceDelegate.h"
#import "UICollectionViewDataSource.h"
#import "UICollectionViewDelegate.h"
#import "UICollectionViewDelegateFlowLayout.h"
@class EmotionCollectionFooterView, LoopPageScrollView, MMEmotionStoreNewSearchController, MMTimer, NSArray, NSMutableArray, NSString, UICollectionView, UICollectionViewFlowLayout, UIView;
@interface MMWechatEmotionsViewController : MMUIViewController <UICollectionViewDataSource, UICollectionViewDelegate, UICollectionViewDelegateFlowLayout, EmoticonStoreMgrExt, EmoticonBoughtListMgrExt, LoopPageScrollViewDataSourceDelegate, EmoticonStoreCellDelegate, EmotionCollectionFooterViewDelegate>
{
UICollectionView *_collectionView;
UICollectionViewFlowLayout *_layout;
LoopPageScrollView *_bannerView;
UIView *m_collectionHeaderView;
EmotionCollectionFooterView *_footerView;
MMTimer *m_timer;
NSArray *m_sectionInfoArray;
NSMutableArray *_storeItemArray;
NSMutableArray *_storeAdsArray;
NSMutableArray *m_cellSetList;
unsigned int _topHotNum;
unsigned int m_recentHotNum;
unsigned int _scene;
unsigned int _reqType;
_Bool m_hasGetFirstResponse;
MMUIViewController *m_contentsViewController;
MMEmotionStoreNewSearchController *m_searchController;
_Bool m_isEmoticonStoreExpt;
}
- (void).cxx_destruct;
- (void)MMRefreshCollectionFooterDidTriggerRefresh:(id)arg1;
- (void)OnRestoreEmoticonItemSuccess;
- (void)OnShowAlertWithIKnow:(id)arg1;
- (void)OnShowAlertWithOK:(id)arg1;
- (void)OnStartLoadingAndShowString:(id)arg1;
- (void)OnStartLoadingAndShowStringBlocked:(id)arg1;
- (void)OnStopLoading;
- (void)OnStopLoadingAndShowError:(id)arg1;
- (void)OnStopLoadingAndShowOK:(id)arg1;
- (void)OnStoreListChanged:(id)arg1 withRet:(int)arg2 withReqType:(unsigned int)arg3 Response:(id)arg4;
- (void)adjustContentInsetAndSearchBarMask;
- (void)changeBannerToNextPage;
- (void)clearSearchDisplayController;
- (id)collectionView:(id)arg1 cellForItemAtIndexPath:(id)arg2;
- (void)collectionView:(id)arg1 didSelectItemAtIndexPath:(id)arg2;
- (struct CGSize)collectionView:(id)arg1 layout:(id)arg2 referenceSizeForFooterInSection:(long long)arg3;
- (struct CGSize)collectionView:(id)arg1 layout:(id)arg2 referenceSizeForHeaderInSection:(long long)arg3;
- (struct CGSize)collectionView:(id)arg1 layout:(id)arg2 sizeForItemAtIndexPath:(id)arg3;
- (long long)collectionView:(id)arg1 numberOfItemsInSection:(long long)arg2;
- (id)collectionView:(id)arg1 viewForSupplementaryElementOfKind:(id)arg2 atIndexPath:(id)arg3;
- (void)configBanner;
- (void)configCollectionView;
- (void)configData;
- (void)configHeaderView;
- (void)configSectionInfos;
- (void)configTailView;
- (void)configViews;
- (void)dealloc;
- (void)didChangeToPage:(unsigned int)arg1;
- (void)didDismissSearchController:(id)arg1;
- (void)didTapPageAtNum:(unsigned int)arg1;
- (id)init;
- (void)initBannerPageTimer;
- (id)initWithEmotionScene:(unsigned int)arg1 AndParentViewController:(id)arg2;
- (id)itemForSectionType:(unsigned long long)arg1 rowIndex:(unsigned long long)arg2;
- (long long)numberOfSectionsInCollectionView:(id)arg1;
- (void)openPageForBannerSet:(id)arg1 IsFromBanner:(_Bool)arg2;
- (void)reloadAdsFromMgr;
- (void)reloadData;
- (void)scrollViewDidScroll:(id)arg1;
- (void)scrollViewWillEndDragging:(id)arg1 withVelocity:(struct CGPoint)arg2 targetContentOffset:(inout struct CGPoint *)arg3;
- (_Bool)shouldInteractivePop;
- (void)stopBannerPageTimer;
- (int)totalNumOfPage;
- (void)tryInitViewOfSearchController;
- (void)viewDidAppear:(_Bool)arg1;
- (void)viewDidDisappear:(_Bool)arg1;
- (void)viewDidLayoutSubviews;
- (void)viewDidLoad;
- (id)viewForPage:(id)arg1 pageNum:(unsigned int)arg2;
- (void)viewWillDisappear:(_Bool)arg1;
- (void)willPresentSearchController:(id)arg1;
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
|
const http = require('http');
const port = 8080;
const startServer = () => {
http.createServer(function (req, res) {
res.writeHead(200, {'Content-Type': 'text/plain'});
res.end('Hello World!');
}).listen(port);
}
module.exports = {
start: startServer,
port
}
|
import React from "react";
import { Modal, Button } from "react-bootstrap";
class EndGame extends React.Component {
render() {
return (
<div>
{" "}
<Modal.Header closeButton className="rules-header">
<Modal.Title id="rules-dashboard-title" className="rules-header">
End Game
</Modal.Title>
</Modal.Header>
<Modal.Body className="rules-text">
<p className="rules-text">
Are you sure that you want to end the game? This will end the game for all players.
</p>
<Button
variant="outline-danger"
className="outlineWhite-Dashboard"
onClick={() => this.endGame()}
>
YES
</Button>
</Modal.Body>
</div>
);
}
}
export default EndGame;
|
"""
Anonymizes and uploads DNS and flow data to cloud.
"""
import time
import datetime
import threading
import utils
import requests
import json
import server_config
from host_state import HostState
UPLOAD_INTERVAL = 5
class DataUploader(object):
def __init__(self, host_state):
assert isinstance(host_state, HostState)
self._host_state = host_state
self._lock = threading.Lock()
self._active = True
self._thread = threading.Thread(target=self._upload_thread)
self._thread.daemon = True
self._last_upload_ts = time.time()
def _upload_thread(self):
# Wait till UI is ready
while True:
time.sleep(1)
with self._host_state.lock:
if self._host_state.ui_is_ready:
break
# Loop until initialized
while True:
if utils.safe_run(self._upload_initialization):
break
self._update_ui_status(
'Please sign the consent form in the browser window.'
)
time.sleep(2)
with self._host_state.lock:
self._host_state.has_consent = True
self._update_ui_status(
'Continuously analyzing your network.\n'
)
# Continuously upload data
while True:
time.sleep(UPLOAD_INTERVAL)
with self._lock:
if not self._active:
return
utils.safe_run(self._upload_data)
def _upload_initialization(self):
if not self._check_consent_form():
return False
return self._update_utc_offset()
def _update_utc_offset(self):
ts = time.time()
utc_offset = int(
(datetime.datetime.fromtimestamp(ts) -
datetime.datetime.utcfromtimestamp(ts)).total_seconds()
)
utc_offset_url = server_config.UTC_OFFSET_URL.format(
user_key=self._host_state.user_key,
offset_seconds=utc_offset
)
utils.log('[DATA] Update UTC offset:', utc_offset_url)
status = requests.get(utc_offset_url).text.strip()
utils.log('[DATA] Update UTC offset status:', status)
return 'SUCCESS' == status
def _check_consent_form(self):
check_consent_url = server_config.CHECK_CONSENT_URL.format(
user_key=self._host_state.user_key
)
utils.log('[DATA] Check consent:', check_consent_url)
status = requests.get(check_consent_url).text.strip()
utils.log('[DATA] Check consent status:', status)
return 'True' == status
def _prepare_upload_data(self):
# Remove all pending tasks
with self._host_state.lock:
dns_responses = self._host_state.pending_dns_responses
pkts = self._host_state.pending_pkts
ua_list = list(self._host_state.ua_set)
self._host_state.pending_dns_responses = []
self._host_state.pending_pkts = []
self._host_state.ua_set = set()
# Aggregate all DNS responses. Build a mapping of domain -> ip_list.
dns_dict = {}
for record in dns_responses:
ip_set = dns_dict.setdefault(record['domain'], set())
dns_dict[record['domain']] = ip_set | record['ip_set']
for domain in dns_dict:
dns_dict[domain] = list(dns_dict[domain])
# Aggregate all pkts into flows. Maps (device_id, device_oui,
# device_ip) -> (remote_ip, remote_port, direction, protocol) ->
# length.
flow_dict = {}
byte_count = 0
for pkt in pkts:
device_mac = pkt['device_mac']
device_oui = device_mac.replace(':', '').lower()[0:6]
device_id = utils.get_device_id(device_mac, self._host_state)
if device_id not in self._host_state.device_whitelist:
continue
device_key = json.dumps((device_id, device_oui, pkt['device_ip']))
device_flow_dict = flow_dict.setdefault(device_key, {})
flow_key = json.dumps((
pkt['remote_ip'], pkt['remote_port'],
pkt['direction'], pkt['protocol']
))
device_flow_dict.setdefault(flow_key, 0)
device_flow_dict[flow_key] += pkt['length']
byte_count += pkt['length']
# Collect arp_cache
ip_mac_dict = self._host_state.get_ip_mac_dict_copy()
arp_cache = []
for (ip, mac) in ip_mac_dict.iteritems():
arp_cache.append({
'device_ip': ip,
'device_id': utils.get_device_id(mac, self._host_state),
'device_oui': mac.replace(':', '').lower()[0:6]
})
# Turn device_mac into device_id in ua_list
ua_list = [
(utils.get_device_id(mac, self._host_state), ua)
for (mac, ua) in ua_list
]
return (dns_dict, flow_dict, byte_count, arp_cache, ua_list)
def _upload_data(self):
(dns_dict, flow_dict, byte_count, arp_cache, ua_list) = \
self._prepare_upload_data()
delta_sec = time.time() - self._last_upload_ts
# Prepare POST
user_key = self._host_state.user_key
url = server_config.SUBMIT_URL.format(user_key=user_key)
post_data = {
'dns': json.dumps(dns_dict),
'flows': json.dumps(flow_dict),
'arp_cache': json.dumps(arp_cache),
'ua_list': json.dumps(ua_list),
'client_version': self._host_state.client_version,
'duration': str(delta_sec)
}
# Try uploading across 5 attempts
for attempt in range(5):
status_text = 'Uploading data to cloud...\n'
if attempt > 0:
status_text += ' (Attempt {} of 5)'.format(attempt + 1)
self._update_ui_status(status_text)
utils.log('[UPLOAD]', status_text)
response = requests.post(url, data=post_data).text
utils.log('[UPLOAD] Gets back server response:', response)
# Update whitelist
try:
response_dict = json.loads(response)
if response_dict['status'] == 'SUCCESS':
self._last_upload_ts = time.time()
with self._host_state.lock:
self._host_state.device_whitelist = \
response_dict['whitelist']
break
except Exception:
pass
time.sleep((attempt + 1) ** 2)
# Report stats to UI
self._update_ui_status(
'Currently analyzing ' +
'{:,}'.format(int(byte_count / 1000.0 / delta_sec)) +
' KB/s of traffic\nacross ' +
'{}'.format(len(flow_dict)) +
' active devices on your local network.\n'
)
utils.log('[UPLOAD] DNS:', ' '.join(dns_dict.keys()))
utils.log(
'[UPLOAD] Total packets in past epoch:',
self._host_state.packet_count
)
with self._host_state.lock:
self._host_state.packet_count = 0
def _update_ui_status(self, value):
utils.log('[DATA] Update UI:', value)
with self._host_state.lock:
if self._host_state.status_text:
self._host_state.status_text.set(value)
def start(self):
with self._lock:
self._active = True
self._thread.start()
utils.log('[Data] Start uploading data.')
def stop(self):
utils.log('[Data] Stopping.')
with self._lock:
self._active = False
self._thread.join()
utils.log('[Data] Stopped.')
|
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
class RelativeMultiHeadAttention(tf.keras.layers.Layer if tf else object):
"""A RelativeMultiHeadAttention layer as described in [3].
Uses segment level recurrence with state reuse.
"""
def __init__(self,
out_dim,
num_heads,
head_dim,
rel_pos_encoder,
input_layernorm=False,
output_activation=None,
**kwargs):
"""Initializes a RelativeMultiHeadAttention keras Layer object.
Args:
out_dim (int):
num_heads (int): The number of attention heads to use.
Denoted `H` in [2].
head_dim (int): The dimension of a single(!) attention head
Denoted `D` in [2].
rel_pos_encoder (:
input_layernorm (bool): Whether to prepend a LayerNorm before
everything else. Should be True for building a GTrXL.
output_activation (Optional[tf.nn.activation]): Optional tf.nn
activation function. Should be relu for GTrXL.
**kwargs:
"""
super().__init__(**kwargs)
# No bias or non-linearity.
self._num_heads = num_heads
self._head_dim = head_dim
# 3=Query, key, and value inputs.
self._qkv_layer = tf.keras.layers.Dense(
3 * num_heads * head_dim, use_bias=False)
self._linear_layer = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(
out_dim, use_bias=False, activation=output_activation))
self._uvar = self.add_weight(shape=(num_heads, head_dim))
self._vvar = self.add_weight(shape=(num_heads, head_dim))
self._pos_proj = tf.keras.layers.Dense(
num_heads * head_dim, use_bias=False)
self._rel_pos_encoder = rel_pos_encoder
self._input_layernorm = None
if input_layernorm:
self._input_layernorm = tf.keras.layers.LayerNormalization(axis=-1)
def call(self, inputs, memory=None):
T = tf.shape(inputs)[1] # length of segment (time)
H = self._num_heads # number of attention heads
d = self._head_dim # attention head dimension
# Add previous memory chunk (as const, w/o gradient) to input.
# Tau (number of (prev) time slices in each memory chunk).
Tau = memory.shape.as_list()[1] if memory is not None else 0
if memory is not None:
inputs = tf.concat((tf.stop_gradient(memory), inputs), axis=1)
# Apply the Layer-Norm.
if self._input_layernorm is not None:
inputs = self._input_layernorm(inputs)
qkv = self._qkv_layer(inputs)
queries, keys, values = tf.split(qkv, 3, -1)
# Cut out Tau memory timesteps from query.
queries = queries[:, -T:]
queries = tf.reshape(queries, [-1, T, H, d])
keys = tf.reshape(keys, [-1, T + Tau, H, d])
values = tf.reshape(values, [-1, T + Tau, H, d])
R = self._pos_proj(self._rel_pos_encoder)
R = tf.reshape(R, [T + Tau, H, d])
# b=batch
# i and j=time indices (i=max-timesteps (inputs); j=Tau memory space)
# h=head
# d=head-dim (over which we will reduce-sum)
score = tf.einsum("bihd,bjhd->bijh", queries + self._uvar, keys)
pos_score = tf.einsum("bihd,jhd->bijh", queries + self._vvar, R)
score = score + self.rel_shift(pos_score)
score = score / d**0.5
# causal mask of the same length as the sequence
mask = tf.sequence_mask(
tf.range(Tau + 1, T + Tau + 1), dtype=score.dtype)
mask = mask[None, :, :, None]
masked_score = score * mask + 1e30 * (mask - 1.)
wmat = tf.nn.softmax(masked_score, axis=2)
out = tf.einsum("bijh,bjhd->bihd", wmat, values)
out = tf.reshape(out, tf.concat((tf.shape(out)[:2], [H * d]), axis=0))
return self._linear_layer(out)
@staticmethod
def rel_shift(x):
# Transposed version of the shift approach described in [3].
# https://github.com/kimiyoung/transformer-xl/blob/
# 44781ed21dbaec88b280f74d9ae2877f52b492a5/tf/model.py#L31
x_size = tf.shape(x)
x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]])
x = tf.reshape(x, [x_size[0], x_size[2] + 1, x_size[1], x_size[3]])
x = x[:, 1:, :, :]
x = tf.reshape(x, x_size)
return x
|
import React from "react";
import Tile from "../Tile.js";
import withNeon from "../../src/index.js";
import ParticlesToon from "../../src/effects/ParticlesToon.js";
class ParticlesTile extends React.Component {
render(){
return (
<Tile bgIm="https://source.unsplash.com/random?a" />
)
}
};
const particlesConfig = {
type: "particles",
mouseMoveCount: 4,
clickMoveCount: 4,
color: ["0", "100%", "100%"]
};
const effect = new ParticlesToon(particlesConfig);
export default withNeon(ParticlesTile, effect);
|
# coding=utf-8
import logging
import re
from .rest_client import AtlassianRestAPI
log = logging.getLogger(__name__)
class Jira(AtlassianRestAPI):
def reindex_status(self):
return self.get('rest/api/2/reindex')
def reindex(self, comments=True, change_history=True, worklogs=True):
"""
Reindex the Jira instance
Kicks off a reindex. Need Admin permissions to perform this reindex.
:param comments: Indicates that comments should also be reindexed. Not relevant for foreground reindex,
where comments are always reindexed.
:param change_history: Indicates that changeHistory should also be reindexed.
Not relevant for foreground reindex, where changeHistory is always reindexed.
:param worklogs: Indicates that changeHistory should also be reindexed.
Not relevant for foreground reindex, where changeHistory is always reindexed.
:return:
"""
params = {}
if not comments:
params['indexComments'] = comments
if not change_history:
params['indexChangeHistory'] = change_history
if not worklogs:
params['indexWorklogs'] = worklogs
return self.post('rest/api/2/reindex', params=params)
def reindex_with_type(self, indexing_type="BACKGROUND_PREFERRED"):
"""
Reindex the Jira instance
Type of re-indexing available:
FOREGROUND - runs a lock/full reindexing
BACKGROUND - runs a background reindexing.
If Jira fails to finish the background reindexing, respond with 409 Conflict (error message).
BACKGROUND_PREFERRED - If possible do a background reindexing.
If it's not possible (due to an inconsistent index), do a foreground reindexing.
:param indexing_type: OPTIONAL: The default value for the type is BACKGROUND_PREFFERED
:return:
"""
return self.post('rest/api/2/reindex?type={}'.format(indexing_type))
def reindex_project(self, project_key):
return self.post('secure/admin/IndexProject.jspa', data='confirmed=true&key={}'.format(project_key),
headers=self.form_token_headers)
def reindex_issue(self, list_of_):
pass
def jql(self, jql, fields='*all', start=0, limit=None, expand=None):
"""
Get issues from jql search result with all related fields
:param jql:
:param fields: list of fields, for example: ['priority', 'summary', 'customfield_10007']
:param start: OPTIONAL: The start point of the collection to return. Default: 0.
:param limit: OPTIONAL: The limit of the number of issues to return, this may be restricted by
fixed system limits. Default by built-in method: 50
:param expand: OPTIONAL: expand the search result
:return:
"""
params = {}
if start is not None:
params['startAt'] = int(start)
if limit is not None:
params['maxResults'] = int(limit)
if fields is not None:
if isinstance(fields, (list, tuple, set)):
fields = ','.join(fields)
params['fields'] = fields
if jql is not None:
params['jql'] = jql
if expand is not None:
params['expand'] = expand
return self.get('rest/api/2/search', params=params)
def csv(self, jql, limit=1000):
"""
Get issues from jql search result with all related fields
:param jql: JQL query
:param limit: max results in the output file
:return: CSV file
"""
params = {'tempMax': limit,
'jqlQuery': jql}
url = 'sr/jira.issueviews:searchrequest-csv-all-fields/temp/SearchRequest.csv'
return self.get(url, params=params, not_json_response=True, headers={'Accept': 'application/csv'})
def user(self, username, expand=None):
"""
Returns a user. This resource cannot be accessed anonymously.
:param username:
:param expand: Can be 'groups,applicationRoles'
:return:
"""
params = {'username': username}
if expand:
params['expand'] = expand
return self.get('rest/api/2/user', params=params)
def is_active_user(self, username):
"""
Check status of user
:param username:
:return:
"""
return self.user(username).get('active')
def user_remove(self, username):
"""
Remove user from Jira if this user does not have any activity
:param username:
:return:
"""
return self.delete('rest/api/2/user?username={0}'.format(username))
def user_update(self, username, data):
"""
Update user attributes based on json
:param username:
:param data:
:return:
"""
url = 'rest/api/2/user?username={0}'.format(username)
return self.put(url, data=data)
def user_update_username(self, old_username, new_username):
"""
Update username
:param old_username:
:param new_username:
:return:
"""
data = {"name": new_username}
return self.user_update(old_username, data=data)
def user_update_email(self, username, email):
"""
Update user email for new domain changes
:param username:
:param email:
:return:
"""
data = {'name': username, 'emailAddress': email}
return self.user_update(username, data=data)
def user_create(self, username, email, display_name, password=None, notification=None):
"""
Create a user in Jira
:param username:
:param email:
:param display_name:
:param password: OPTIONAL: If a password is not set, a random password is generated.
:param notification: OPTIONAL: Sends the user an email confirmation that they have been added to Jira.
Default:false.
:return:
"""
log.warning('Creating user {}'.format(display_name))
data = {'name': username,
'emailAddress': email,
'displayName': display_name}
if password is not None:
data['password'] = password
else:
data['notification'] = True
if notification is not None:
data['notification'] = True
if notification is False:
data['notification'] = False
return self.post('rest/api/2/user', data=data)
def user_properties(self, username):
"""
Get user property
:param username:
:return:
"""
return self.get('rest/api/2/user/properties?username={}'.format(username))
def user_property(self, username, key_property):
"""
Get user property
:param key_property:
:param username:
:return:
"""
params = {'username': username}
return self.get('rest/api/2/user/properties/{}'.format(key_property), params=params)
def user_set_property(self, username, key_property, value_property):
"""
Set property for user
:param username:
:param key_property:
:param value_property:
:return:
"""
url = 'rest/api/2/user/properties/{key_property}?username={user_name}'.format(key_property=key_property,
user_name=username)
data = {'value': value_property}
return self.put(url, data=data)
def user_delete_property(self, username, key_property):
"""
Delete property for user
:param username:
:param key_property:
:return:
"""
url = 'rest/api/2/user/properties/{}'.format(key_property)
params = {'username': username}
return self.delete(url, params=params)
def user_update_or_create_property_through_rest_point(self, username, key, value):
"""
ATTENTION!
This method used after configuration of rest endpoint on Jira side
:param username:
:param key:
:param value:
:return:
"""
url = 'rest/scriptrunner/latest/custom/updateUserProperty'
params = {'username': username, 'property': key, 'value': value}
return self.get(url, params=params)
def user_deactivate(self, username):
"""
Disable user. Works from 8.3.0 Release
https://docs.atlassian.com/software/jira/docs/api/REST/8.3.0/#api/2/user-updateUser
:param username:
:return:
"""
data = {"active": "false", "name": username}
return self.user_update(username=username, data=data)
def user_disable(self, username):
"""Override the disable method"""
return self.user_deactivate(username)
def user_disable_throw_rest_endpoint(self, username, url='rest/scriptrunner/latest/custom/disableUser',
param='userName'):
"""The disable method throw own rest enpoint"""
url = "{}?{}={}".format(url, param, username)
return self.get(path=url)
def user_get_websudo(self):
""" Get web sudo cookies using normal http request"""
url = 'secure/admin/WebSudoAuthenticate.jspa'
headers = self.form_token_headers
data = {
'webSudoPassword': self.password,
'webSudoIsPost': 'false',
}
answer = self.get('secure/admin/WebSudoAuthenticate.jspa', self.form_token_headers)
atl_token = None
if answer:
atl_token = \
answer.split('<meta id="atlassian-token" name="atlassian-token" content="')[1].split('\n')[0].split(
'"')[0]
if atl_token:
data['atl_token'] = atl_token
return self.post(path=url, data=data, headers=headers)
def user_find_by_user_string(self, username, start=0, limit=50, include_inactive_users=False,
include_active_users=True):
"""
Fuzzy search using username and display name
:param username: Use '.' to find all users
:param start: OPTIONAL: The start point of the collection to return. Default: 0.
:param limit: OPTIONAL: The limit of the number of users to return, this may be restricted by
fixed system limits. Default by built-in method: 50
:param include_inactive_users: OPTIONAL: Return users with "active: False"
:param include_active_users: OPTIONAL: Return users with "active: True".
:return:
"""
url = 'rest/api/2/user/search'
params = {'username': username,
'includeActive': include_active_users,
'includeInactive': include_inactive_users,
'startAt': start,
'maxResults': limit
}
return self.get(url, params=params)
def is_user_in_application(self, username, application_key):
"""
Utility function to test whether a user has an application role
:param username: The username of the user to test.
:param application_key: The application key of the application
:return: True if the user has the application, else False
"""
user = self.user(username, 'applicationRoles') # Get applications roles of the user
if 'self' in user:
for application_role in user.get('applicationRoles').get('items'):
if application_role.get('key') == application_key:
return True
return False
def add_user_to_application(self, username, application_key):
"""
Add a user to an application
:param username: The username of the user to add.
:param application_key: The application key of the application
:return: True if the user was added to the application, else False
:see: https://docs.atlassian.com/software/jira/docs/api/REST/7.5.3/#api/2/user-addUserToApplication
"""
params = {
'username': username,
'applicationKey': application_key
}
return self.post('rest/api/2/user/application', params=params) is None
# Application roles
def get_all_application_roles(self):
"""
Returns all ApplicationRoles in the system
:return:
"""
url = 'rest/api/2/applicationrole'
return self.get(url) or {}
def get_application_role(self, role_key):
"""
Returns the ApplicationRole with passed key if it exists
:param role_key: str
:return:
"""
url = 'rest/api/2/applicationrole/{}'.format(role_key)
return self.get(url) or {}
def projects(self, included_archived=None):
"""Returns all projects which are visible for the currently logged in user.
If no user is logged in, it returns the list of projects that are visible when using anonymous access.
:param included_archived: boolean whether to include archived projects in response, default: false
:return:
"""
params = {}
if included_archived:
params['includeArchived'] = included_archived
return self.get('rest/api/2/project')
def get_all_projects(self, included_archived=None):
return self.projects(included_archived)
def project(self, key):
return self.get('rest/api/2/project/{0}'.format(key))
def delete_project(self, key):
"""
DELETE /rest/api/2/project/<project_key>
:param key: str
:return:
"""
return self.delete('rest/api/2/project/{0}'.format(key))
def get_project_components(self, key):
"""
Get project components using project key
:param key: str
:return:
"""
return self.get('rest/api/2/project/{0}/components'.format(key))
def get_project_versions(self, key, expand=None):
"""
Contains a full representation of a the specified project's versions.
:param key:
:param expand: the parameters to expand
:return:
"""
params = {}
if expand is not None:
params['expand'] = expand
return self.get('rest/api/2/project/{}/versions'.format(key), params=params)
def get_project_versions_paginated(self, key, start=None, limit=None, order_by=None, expand=None):
"""
Returns all versions for the specified project. Results are paginated.
Results can be ordered by the following fields:
sequence
name
startDate
releaseDate
:param key: the project key or id
:param start: the page offset, if not specified then defaults to 0
:param limit: how many results on the page should be included. Defaults to 50.
:param order_by: ordering of the results.
:param expand: the parameters to expand
:return:
"""
params = {}
if start is not None:
params['startAt'] = int(start)
if limit is not None:
params['maxResults'] = int(limit)
if order_by is not None:
params['orderBy'] = order_by
if expand is not None:
params['expand'] = expand
return self.get('rest/api/2/project/{}/version'.format(key), params)
def add_version(self, project_key, project_id, version, is_archived=False, is_released=False):
"""
Add missing version to project
:param project_key: the project key
:param project_id: the project id
:param version: the new project version to add
:param is_archived:
:param is_released:
:return:
"""
payload = {'name': version, 'archived': is_archived, 'released': is_released, 'project': project_key,
'projectId': project_id}
return self.post("rest/api/2/version", data=payload)
def get_project_roles(self, project_key):
"""
Provide associated project roles
:param project_key:
:return:
"""
return self.get('rest/api/2/project/{0}/role'.format(project_key))
def get_project_actors_for_role_project(self, project_key, role_id):
"""
Returns the details for a given project role in a project.
:param project_key:
:param role_id:
:return:
"""
url = 'rest/api/2/project/{projectIdOrKey}/role/{id}'.format(projectIdOrKey=project_key,
id=role_id)
return (self.get(url) or {}).get('actors')
def delete_project_actors(self, project_key, role_id, actor, actor_type=None):
"""
Deletes actors (users or groups) from a project role.
Delete a user from the role: /rest/api/2/project/{projectIdOrKey}/role/{roleId}?user={username}
Delete a group from the role: /rest/api/2/project/{projectIdOrKey}/role/{roleId}?group={groupname}
:param project_key:
:param role_id:
:param actor:
:param actor_type: str : group or user string
:return:
"""
url = 'rest/api/2/project/{projectIdOrKey}/role/{roleId}'.format(projectIdOrKey=project_key,
roleId=role_id)
params = {}
if actor_type is not None and actor_type in ['group', 'user']:
params[actor_type] = actor
return self.delete(url, params=params)
def add_project_actor_in_role(self, project_key, role_id, actor, actor_type):
"""
:param project_key:
:param role_id:
:param actor:
:param actor_type:
:return:
"""
url = 'rest/api/2/project/{projectIdOrKey}/role/{roleId}'.format(projectIdOrKey=project_key,
roleId=role_id)
data = {}
if actor_type in ['group', 'atlassian-group-role-actor']:
data['group'] = [actor]
elif actor_type in ['user', 'atlassian-user-role-actor']:
data['user'] = [actor]
return self.post(url, data=data)
def update_project(self, project_key, data, expand=None):
"""
Updates a project.
Update project: /rest/api/2/project/{projectIdOrKey}
:param project_key: project key of project that needs to be updated
:param data: dictionary containing the data to be updated
:param expand: the parameters to expand
"""
if expand:
url = 'rest/api/2/project/{projectIdOrKey}?expand={expand}'.format(projectIdOrKey=project_key,
expand=expand)
else:
url = 'rest/api/2/project/{projectIdOrKey}'.format(projectIdOrKey=project_key)
return self.put(url, data)
def get_project_permission_scheme(self, project_id_or_key, expand=None):
"""
Gets a permission scheme assigned with a project
Use 'expand' to get details
:param project_id_or_key: str
:param expand: str
:return: data of project permission scheme
"""
if expand is None:
url = 'rest/api/2/project/{}/permissionscheme'.format(project_id_or_key)
else:
url = 'rest/api/2/project/{0}/permissionscheme?expand={1}'.format(project_id_or_key, expand)
return self.get(url)
def create_issue_type(self, name, description='', type='standard'):
"""
Create a new issue type
:param name:
:param description:
:param type: standard or sub-task
:return:
"""
data = {
'name': name,
'description': description,
'type': type
}
return self.post('rest/api/2/issuetype', data=data)
def issue(self, key, fields='*all'):
return self.get('rest/api/2/issue/{0}?fields={1}'.format(key, fields))
def get_issue(self, issue_id_or_key, fields=None, properties=None, update_history=True):
"""
Returns a full representation of the issue for the given issue key
By default, all fields are returned in this get-issue resource
:param issue_id_or_key: str
:param fields: str
:param properties: str
:param update_history: bool
:return: issue
"""
url = 'rest/api/2/issue/{}'.format(issue_id_or_key)
params = {}
if fields is not None:
params['fields'] = fields
if properties is not None:
params['properties'] = properties
if update_history is True:
params['updateHistory'] = 'true'
if update_history is False:
params['updateHistory'] = 'false'
return self.get(url, params=params)
def bulk_issue(self, issue_list, fields='*all'):
"""
:param fields:
:param list issue_list:
:return:
"""
jira_issue_regex = re.compile(r'[A-Z]{1,10}-\d+')
missing_issues = list()
matched_issue_keys = list()
for key in issue_list:
if re.match(jira_issue_regex, key):
matched_issue_keys.append(key)
jql = 'key in ({})'.format(', '.join(matched_issue_keys))
query_result = self.jql(jql, fields=fields)
if 'errorMessages' in query_result.keys():
for message in query_result['errorMessages']:
for key in issue_list:
if key in message:
missing_issues.append(key)
issue_list.remove(key)
query_result, missing_issues = self.bulk_issue(issue_list, fields)
return query_result, missing_issues
def get_issue_changelog(self, issue_key):
"""
Get issue related change log
:param issue_key:
:return:
"""
url = 'rest/api/2/issue/{}?expand=changelog'.format(issue_key)
return (self.get(url) or {}).get('changelog')
def issue_add_json_worklog(self, key, worklog):
"""
:param key:
:param worklog:
:return:
"""
url = 'rest/api/2/issue/{}/worklog'.format(key)
return self.post(url, data=worklog)
def issue_worklog(self, key, started, time_sec, comment=None):
"""
:param key:
:param time_sec: int: second
:param started:
:param comment:
:return:
"""
data = {
"started": started,
"timeSpentSeconds": time_sec
}
if comment:
data['comment'] = comment
return self.issue_add_json_worklog(key=key, worklog=data)
def issue_get_worklog(self, issue_id_or_key):
"""
Returns all work logs for an issue.
Note: Work logs won't be returned if the Log work field is hidden for the project.
:param issue_id_or_key:
:return:
"""
url = "rest/api/2/issue/{issueIdOrKey}/worklog".format(issueIdOrKey=issue_id_or_key)
return self.get(url)
def issue_field_value(self, key, field):
issue = self.get('rest/api/2/issue/{0}?fields={1}'.format(key, field))
return issue['fields'][field]
def issue_fields(self, key):
issue = self.get('rest/api/2/issue/{0}'.format(key))
return issue['fields']
def update_issue_field(self, key, fields='*all'):
return self.put('rest/api/2/issue/{0}'.format(key), data={'fields': fields})
def get_custom_fields(self, search=None, start=1, limit=50):
"""
Get custom fields. Evaluated on 7.12
:param search: str
:param start: long Default: 1
:param limit: int Default: 50
:return:
"""
url = 'rest/api/2/customFields'
params = {}
if search:
params['search'] = search
if start:
params['startAt'] = start
if limit:
params['maxResults'] = limit
return self.get(url, params=params)
def create_custom_field(self, name, type, search_key=None, description=None):
"""
Creates a custom field with the given name and type
:param name: str
:param type: str, like 'com.atlassian.jira.plugin.system.customfieldtypes:textfield'
:param search_key: str, like above
:param description: str
"""
url = 'rest/api/2/field'
data = {'name': name, 'type': type}
if search_key:
data['search_key'] = search_key
if description:
data['description'] = description
return self.post(url, data=data)
def get_all_available_screen_fields(self, screen_id):
"""
Get all available fields by screen id
:param screen_id:
:return:
"""
url = 'rest/api/2/screens/{}/availableFields'.format(screen_id)
return self.get(url)
def get_screen_tabs(self, screen_id):
"""
Get tabs for the screen id
:param screen_id:
:return:
"""
url = 'rest/api/2/screens/{}/tabs'.format(screen_id)
return self.get(url)
def get_screen_tab_fields(self, screen_id, tab_id):
"""
Get fields by the tab id and the screen id
:param tab_id:
:param screen_id:
:return:
"""
url = 'rest/api/2/screens/{}/tabs/{}/fields'.format(screen_id, tab_id)
return self.get(url)
def get_all_screen_fields(self, screen_id):
"""
Get all fields by screen id
:param screen_id:
:return:
"""
screen_tabs = self.get_screen_tabs(screen_id)
fields = []
for screen_tab in screen_tabs:
tab_id = screen_tab['id']
if tab_id:
tab_fields = self.get_screen_tab_fields(screen_id=screen_id, tab_id=tab_id)
fields = fields + tab_fields
return fields
def get_issue_labels(self, issue_key):
"""
Get issue labels.
:param issue_key:
:return:
"""
url = 'rest/api/2/issue/{issue_key}?fields=labels'.format(issue_key=issue_key)
return (self.get(url) or {}).get('fields').get('labels')
def get_all_fields(self):
"""
Returns a list of all fields, both System and Custom
:return: application/jsonContains a full representation of all visible fields in JSON.
"""
url = 'rest/api/2/field'
return self.get(url)
def get_all_custom_fields(self):
"""
Returns a list of all custom fields
That method just filtering all fields method
:return: application/jsonContains a full representation of all visible fields in JSON.
"""
fields = self.get_all_fields()
custom_fields = []
for field in fields:
if field['custom']:
custom_fields.append(field)
return custom_fields
def project_leaders(self):
for project in self.projects():
key = project['key']
project_data = self.project(key)
lead = self.user(project_data['lead']['name'])
yield {
'project_key': key,
'project_name': project['name'],
'lead_name': lead['displayName'],
'lead_key': lead['name'],
'lead_email': lead['emailAddress']}
def get_project_issuekey_last(self, project):
jql = 'project = {project} ORDER BY issuekey DESC'.format(project=project)
return (self.jql(jql).get('issues') or {})[0]['key']
def get_project_issuekey_all(self, project):
jql = 'project = {project} ORDER BY issuekey ASC'.format(project=project)
return [issue['key'] for issue in self.jql(jql)['issues']]
def get_project_issues_count(self, project):
jql = 'project = "{project}" '.format(project=project)
return self.jql(jql, fields='*none')['total']
def get_all_project_issues(self, project, fields='*all'):
jql = 'project = {project} ORDER BY key'.format(project=project)
return self.jql(jql, fields=fields)['issues']
def get_all_assignable_users_for_project(self, project_key, start=0, limit=50):
"""
Provide assignable users for project
:param project_key:
:param start: OPTIONAL: The start point of the collection to return. Default: 0.
:param limit: OPTIONAL: The limit of the number of users to return, this may be restricted by
fixed system limits. Default by built-in method: 50
:return:
"""
url = 'rest/api/2/user/assignable/search?project={project_key}&startAt={start}&maxResults={limit}'.format(
project_key=project_key,
start=start,
limit=limit)
return self.get(url)
def get_assignable_users_for_issue(self, issue_key, username=None, start=0, limit=50):
"""
Provide assignable users for issue
:param issue_key:
:param username: OPTIONAL: Can be used to chaeck if user can be assigned
:param start: OPTIONAL: The start point of the collection to return. Default: 0.
:param limit: OPTIONAL: The limit of the number of users to return, this may be restricted by
fixed system limits. Default by built-in method: 50
:return:
"""
url = 'rest/api/2/user/assignable/search?issueKey={issue_key}&startAt={start}&maxResults={limit}'.format(
issue_key=issue_key,
start=start,
limit=limit)
if username:
url += '&username={username}'.format(username=username)
return self.get(url)
def get_groups(self, query=None, exclude=None, limit=20):
"""
REST endpoint for searching groups in a group picker
Returns groups with substrings matching a given query. This is mainly for use with the group picker,
so the returned groups contain html to be used as picker suggestions. The groups are also wrapped
in a single response object that also contains a header for use in the picker,
specifically Showing X of Y matching groups.
The number of groups returned is limited by the system property "jira.ajax.autocomplete.limit"
The groups will be unique and sorted.
:param query: str
:param exclude: str
:param limit: int
:return: Returned even if no groups match the given substring
"""
url = 'rest/api/2/groups/picker'
params = {}
if query:
params['query'] = query
else:
params['query'] = ''
if exclude:
params['exclude'] = exclude
if limit:
params['maxResults'] = limit
return self.get(url, params=params)
def create_group(self, name):
"""
Create a group by given group parameter
:param name: str
:return: New group params
"""
url = 'rest/api/2/group'
data = {'name': name}
return self.post(url, data=data)
def remove_group(self, name, swap_group=None):
"""
Delete a group by given group parameter
If you delete a group and content is restricted to that group, the content will be hidden from all users
To prevent this, use this parameter to specify a different group to transfer the restrictions
(comments and worklogs only) to
:param name: str
:param swap_group: str
:return:
"""
log.warning('Removing group...')
url = 'rest/api/2/group'
if swap_group is not None:
params = {'groupname': name, 'swapGroup': swap_group}
else:
params = {'groupname': name}
return self.delete(url, params=params)
def get_all_users_from_group(self, group, include_inactive_users=False, start=0, limit=50):
"""
Just wrapping method user group members
:param group:
:param include_inactive_users:
:param start: OPTIONAL: The start point of the collection to return. Default: 0.
:param limit: OPTIONAL: The limit of the number of users to return, this may be restricted by
fixed system limits. Default by built-in method: 50
:return:
"""
url = 'rest/api/2/group/member'
params = {}
if group:
params['groupname'] = group
params['includeInactiveUsers'] = include_inactive_users
params['startAt'] = start
params['maxResults'] = limit
return self.get(url, params=params)
def add_user_to_group(self, username, group_name):
"""
Add given user to a group
:param username: str
:param group_name: str
:return: Current state of the group
"""
url = 'rest/api/2/group/user'
params = {'groupname': group_name}
data = {'name': username}
return self.post(url, params=params, data=data)
def remove_user_from_group(self, username, group_name):
"""
Remove given user from a group
:param username: str
:param group_name: str
:return:
"""
log.warning('Removing user from a group...')
url = 'rest/api/2/group/user'
params = {'groupname': group_name, 'username': username}
return self.delete(url, params=params)
def issue_exists(self, issue_key):
original_value = self.advanced_mode
self.advanced_mode = True
try:
resp = self.issue(issue_key, fields="*none")
if resp.status_code == 404:
log.info(
'Issue "{issue_key}" does not exists'.format(issue_key=issue_key)
)
return False
resp.raise_for_status()
log.info('Issue "{issue_key}" exists'.format(issue_key=issue_key))
return True
finally:
self.advanced_mode = original_value
def issue_deleted(self, issue_key):
exists = self.issue_exists(issue_key)
if exists:
log.info('Issue "{issue_key}" is not deleted'.format(issue_key=issue_key))
else:
log.info('Issue "{issue_key}" is deleted'.format(issue_key=issue_key))
return not exists
def delete_issue(self, issue_id_or_key, delete_subtasks=True):
"""
Delete an issue
If the issue has subtasks you must set the parameter delete_subtasks = True to delete the issue
You cannot delete an issue without its subtasks also being deleted
:param issue_id_or_key:
:param delete_subtasks:
:return:
"""
url = 'rest/api/2/issue/{}'.format(issue_id_or_key)
params = {}
if delete_subtasks is True:
params['deleteSubtasks'] = 'true'
else:
params['deleteSubtasks'] = 'false'
log.warning('Removing issue {}...'.format(issue_id_or_key))
return self.delete(url, params=params)
# @todo merge with edit_issue method
def issue_update(self, issue_key, fields):
log.warning('Updating issue "{issue_key}" with "{fields}"'.format(issue_key=issue_key, fields=fields))
url = 'rest/api/2/issue/{0}'.format(issue_key)
return self.put(url, data={'fields': fields})
def edit_issue(self, issue_id_or_key, fields, notify_users=True):
"""
Edits an issue from a JSON representation
The issue can either be updated by setting explicit the field
value(s) or by using an operation to change the field value
:param issue_id_or_key: str
:param fields: JSON
:param notify_users: bool
:return:
"""
url = 'rest/api/2/issue/{}'.format(issue_id_or_key)
params = {}
data = {'update': fields}
if notify_users is True:
params['notifyUsers'] = 'true'
else:
params['notifyUsers'] = 'false'
return self.put(url, data=data, params=params)
def issue_add_watcher(self, issue_key, user):
"""
Start watching issue
:param issue_key:
:param user:
:return:
"""
log.warning('Adding user {user} to "{issue_key}" watchers'.format(issue_key=issue_key, user=user))
data = user
return self.post('rest/api/2/issue/{issue_key}/watchers'.format(issue_key=issue_key), data=data)
def assign_issue(self, issue, assignee=None):
"""Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue ID or key to assign
:type issue: int or str
:param assignee: the user to assign the issue to
:type assignee: str
:rtype: bool
"""
url = 'rest/api/2/issue/{issue}/assignee'.format(issue=issue)
data = {'name': assignee}
return self.put(url, data=data)
def create_issue(self, fields, update_history=False):
"""
Creates an issue or a sub-task from a JSON representation
:param fields: JSON data
:param update_history: bool (if true then the user's project history is updated)
:return:
"""
url = 'rest/api/2/issue'
data = {'fields': fields}
params = {}
if update_history is True:
params['updateHistory'] = 'true'
else:
params['updateHistory'] = 'false'
return self.post(url, params=params, data=data)
def create_issues(self, list_of_issues_data):
"""
Creates issues or sub-tasks from a JSON representation
Creates many issues in one bulk operation
:param list_of_issues_data: list of JSON data
:return:
"""
url = 'rest/api/2/issue/bulk'
data = {'issueUpdates': list_of_issues_data}
return self.post(url, data=data)
# @todo refactor and merge with create_issue method
def issue_create(self, fields):
log.warning('Creating issue "{summary}"'.format(summary=fields['summary']))
url = 'rest/api/2/issue'
return self.post(url, data={'fields': fields})
def issue_create_or_update(self, fields):
issue_key = fields.get('issuekey', None)
if not issue_key or not self.issue_exists(issue_key):
log.info('IssueKey is not provided or does not exists in destination. Will attempt to create an issue')
del fields['issuekey']
return self.issue_create(fields)
if self.issue_deleted(issue_key):
log.warning('Issue "{issue_key}" deleted, skipping'.format(issue_key=issue_key))
return None
log.info('Issue "{issue_key}" exists, will update'.format(issue_key=issue_key))
del fields['issuekey']
return self.issue_update(issue_key, fields)
def issue_add_comment(self, issue_key, comment, visibility=None):
"""
Add comment into Jira issue
:param issue_key:
:param comment:
:param visibility: OPTIONAL
:return:
"""
url = 'rest/api/2/issue/{issueIdOrKey}/comment'.format(issueIdOrKey=issue_key)
data = {'body': comment}
if visibility:
data['visibility'] = visibility
return self.post(url, data=data)
# Attachments
def get_attachment(self, attachment_id):
"""
Returns the meta-data for an attachment, including the URI of the actual attached file
:param attachment_id: int
:return:
"""
url = 'rest/api/2/attachment/{}'.format(attachment_id)
return self.get(url)
def remove_attachment(self, attachment_id):
"""
Remove an attachment from an issue
:param attachment_id: int
:return: if success, return None
"""
url = 'rest/api/2/attachment/{}'.format(attachment_id)
return self.delete(url)
def get_attachment_meta(self):
"""
Returns the meta information for an attachments,
specifically if they are enabled and the maximum upload size allowed
:return:
"""
url = 'rest/api/2/attachment/meta'
return self.get(url)
def add_attachment(self, issue_key, filename):
"""
Add attachment to Issue
:param issue_key: str
:param filename: str, name, if file in current directory or full path to file
"""
log.warning('Adding attachment...')
headers = {'X-Atlassian-Token': 'no-check'}
url = 'rest/api/2/issue/{}/attachments'.format(issue_key)
with open(filename, 'rb') as attachment:
files = {'file': attachment}
return self.post(url, headers=headers, files=files)
def get_issue_remotelinks(self, issue_key, global_id=None, internal_id=None):
"""
Compatibility naming method with get_issue_remote_links()
"""
return self.get_issue_remote_links(issue_key, global_id, internal_id)
def get_issue_remote_links(self, issue_key, global_id=None, internal_id=None):
"""
Finding all Remote Links on an issue, also with filtering by Global ID and internal ID
:param issue_key:
:param global_id: str
:param internal_id: str
:return:
"""
url = 'rest/api/2/issue/{issue_key}/remotelink'.format(issue_key=issue_key)
params = {}
if global_id:
params['globalId'] = global_id
if internal_id:
url += '/' + internal_id
return self.get(url, params=params)
def create_or_update_issue_remote_links(self, issue_key, link_url, title, global_id=None, relationship=None):
"""
Add Remote Link to Issue, update url if global_id is passed
:param issue_key: str
:param link_url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, OPTIONAL: Default by built-in method: 'Web Link'
"""
url = 'rest/api/2/issue/{issue_key}/remotelink'.format(issue_key=issue_key)
data = {'object': {'url': link_url, 'title': title}}
if global_id:
data['globalId'] = global_id
if relationship:
data['relationship'] = relationship
return self.post(url, data=data)
def get_issue_remote_link_by_id(self, issue_key, link_id):
url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id)
return self.get(url)
def update_issue_remote_link_by_id(self, issue_key, link_id, url, title, global_id=None, relationship=None):
"""
Update existing Remote Link on Issue
:param issue_key: str
:param link_id: str
:param url: str
:param title: str
:param global_id: str, OPTIONAL:
:param relationship: str, Optional. Default by built-in method: 'Web Link'
"""
data = {'object': {'url': url, 'title': title}}
if global_id:
data['globalId'] = global_id
if relationship:
data['relationship'] = relationship
url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id)
return self.put(url, data=data)
def delete_issue_remote_link_by_id(self, issue_key, link_id):
"""
Deletes Remote Link on Issue
:param issue_key: str
:param link_id: str
"""
url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id)
return self.delete(url)
def get_issue_transitions(self, issue_key):
return [{'name': transition['name'], 'id': int(transition['id']), 'to': transition['to']['name']}
for transition in (self.get_issue_transitions_full(issue_key) or {}).get('transitions')]
def get_issue_transitions_full(self, issue_key, transition_id=None, expand=None):
"""
Get a list of the transitions possible for this issue by the current user,
along with fields that are required and their types.
Fields will only be returned if expand = 'transitions.fields'.
The fields in the metadata correspond to the fields in the transition screen for that transition.
Fields not in the screen will not be in the metadata.
:param issue_key: str
:param transition_id: str
:param expand: str
:return:
"""
url = 'rest/api/2/issue/{issue_key}/transitions'.format(issue_key=issue_key)
params = {}
if transition_id:
params['transitionId'] = transition_id
if expand:
params['expand'] = expand
return self.get(url, params=params)
def get_status_id_from_name(self, status_name):
url = 'rest/api/2/status/{name}'.format(name=status_name)
return int((self.get(url) or {}).get('id'))
def get_status_for_project(self, project_key):
url = 'rest/api/2/project/{name}/statuses'.format(name=project_key)
return self.get(url)
def get_transition_id_to_status_name(self, issue_key, status_name):
for transition in self.get_issue_transitions(issue_key):
if status_name.lower() == transition['to'].lower():
return int(transition['id'])
def issue_transition(self, issue_key, status):
return self.set_issue_status(issue_key, status)
def set_issue_status(self, issue_key, status_name):
url = 'rest/api/2/issue/{issue_key}/transitions'.format(issue_key=issue_key)
transition_id = self.get_transition_id_to_status_name(issue_key, status_name)
return self.post(url, data={'transition': {'id': transition_id}})
def set_issue_status_by_transition_id(self, issue_key, transition_id):
"""
Setting status by transition_id
:param issue_key: str
:param transition_id: int
"""
url = 'rest/api/2/issue/{issue_key}/transitions'.format(issue_key=issue_key)
return self.post(url, data={'transition': {'id': transition_id}})
def get_issue_status(self, issue_key):
url = 'rest/api/2/issue/{issue_key}?fields=status'.format(issue_key=issue_key)
return (((self.get(url) or {}).get('fields') or {}).get('status') or {}).get('name') or {}
def get_issue_status_id(self, issue_key):
url = 'rest/api/2/issue/{issue_key}?fields=status'.format(issue_key=issue_key)
return (self.get(url) or {}).get('fields').get('status').get('id')
def get_issue_link_types(self):
"""Returns a list of available issue link types,
if issue linking is enabled.
Each issue link type has an id,
a name and a label for the outward and inward link relationship.
"""
url = 'rest/api/2/issueLinkType'
return (self.get(url) or {}).get('issueLinkTypes')
def get_issue_link_types_names(self):
"""
Provide issue link type names
:return:
"""
return [link_type['name'] for link_type in self.get_issue_link_types()]
def create_issue_link_type_by_json(self, data):
"""Create a new issue link type.
:param data:
{
"name": "Duplicate",
"inward": "Duplicated by",
"outward": "Duplicates"
}
:return:
"""
url = 'rest/api/2/issueLinkType'
return self.post(url, data=data)
def create_issue_link_type(self, link_type_name, inward, outward):
"""Create a new issue link type.
:param outward:
:param inward:
:param link_type_name:
:return:
"""
if link_type_name.lower() in [x.lower() for x in self.get_issue_link_types_names()]:
log.error("Link type name already exists")
return "Link type name already exists"
data = {
'name': link_type_name,
'inward': inward,
'outward': outward
}
return self.create_issue_link_type_by_json(data=data)
def get_issue_link_type(self, issue_link_type_id):
"""Returns for a given issue link type id all information about this issue link type.
"""
url = 'rest/api/2/issueLinkType/{issueLinkTypeId}'.format(issueLinkTypeId=issue_link_type_id)
return self.get(url)
def delete_issue_link_type(self, issue_link_type_id):
"""Delete the specified issue link type."""
url = 'rest/api/2/issueLinkType/{issueLinkTypeId}'.format(issueLinkTypeId=issue_link_type_id)
return self.delete(url)
def update_issue_link_type(self, issue_link_type_id, data):
"""
Update the specified issue link type.
:param issue_link_type_id:
:param data: {
"name": "Duplicate",
"inward": "Duplicated by",
"outward": "Duplicates"
}
:return:
"""
url = 'rest/api/2/issueLinkType/{issueLinkTypeId}'.format(issueLinkTypeId=issue_link_type_id)
return self.put(url, data=data)
def create_issue_link(self, data):
"""
Creates an issue link between two issues.
The user requires the link issue permission for the issue which will be linked to another issue.
The specified link type in the request is used to create the link and will create a link from
the first issue to the second issue using the outward description. It also create a link from
the second issue to the first issue using the inward description of the issue link type.
It will add the supplied comment to the first issue. The comment can have a restriction who can view it.
If group is specified, only users of this group can view this comment, if roleLevel is specified only users
who have the specified role can view this comment.
The user who creates the issue link needs to belong to the specified group or have the specified role.
:param data: i.e.
{
"type": {"name": "Duplicate" },
"inwardIssue": { "key": "HSP-1"},
"outwardIssue": {"key": "MKY-1"},
"comment": { "body": "Linked related issue!",
"visibility": { "type": "group", "value": "jira-software-users" }
}
}
:return:
"""
log.info(
'Linking issue {inward} and {outward}'.format(inward=data['inwardIssue'], outward=data['outwardIssue']))
url = 'rest/api/2/issueLink'
return self.post(url, data=data)
def remove_issue_link(self, link_id):
"""
Deletes an issue link with the specified id.
To be able to delete an issue link you must be able to view both issues
and must have the link issue permission for at least one of the issues.
:param link_id: the issue link id.
:return:
"""
url = 'rest/api/2/issueLink/{}'.format(link_id)
return self.delete(url)
def get_issue_link(self, link_id):
"""
Returns an issue link with the specified id.
:param link_id: the issue link id.
:return:
"""
url = 'rest/api/2/issueLink/{}'.format(link_id)
return self.get(url)
def create_filter(self, name, jql, description=None, favourite=False):
"""
:param name: str
:param jql: str
:param description: str, Optional. Empty string by default
:param favourite: bool, Optional. False by default
"""
data = {'jql': jql, 'name': name, 'description': description if description else '',
'favourite': 'true' if favourite else 'false'}
url = 'rest/api/2/filter'
return self.post(url, data=data)
def component(self, component_id):
return self.get('rest/api/2/component/{component_id}'.format(component_id=component_id))
def get_component_related_issues(self, component_id):
"""
Returns counts of issues related to this component.
:param component_id:
:return:
"""
url = 'rest/api/2/component/{component_id}/relatedIssueCounts'.format(component_id=component_id)
return self.get(url)
def create_component(self, component):
log.warning('Creating component "{name}"'.format(name=component['name']))
url = 'rest/api/2/component/'
return self.post(url, data=component)
def delete_component(self, component_id):
log.warning('Deleting component "{component_id}"'.format(component_id=component_id))
return self.delete('rest/api/2/component/{component_id}'.format(component_id=component_id))
def update_component_lead(self, component_id, lead):
data = {'id': component_id, 'leadUserName': lead}
return self.put('rest/api/2/component/{component_id}'.format(component_id=component_id), data=data)
def get_resolution_by_id(self, resolution_id):
"""
Get Resolution info by id
:param resolution_id:
:return:
"""
url = 'rest/api/2/resolution/{}'.format(resolution_id)
return self.get(url)
def get_priority_by_id(self, priority_id):
"""
Get Priority info by id
:param priority_id:
:return:
"""
url = 'rest/api/2/resolution/{}'.format(priority_id)
return self.get(url)
def get_all_workflows(self):
"""
Provide all workflows for application admin
:return:
"""
url = 'rest/api/2/workflow'
return self.get(url)
def get_all_statuses(self):
"""
Returns a list of all statuses
:return:
"""
url = 'rest/api/2/status'
return self.get(url)
def get_all_resolutions(self):
"""
Returns a list of all resolutions.
:return:
"""
url = 'rest/api/2/resolution'
return self.get(url)
def get_all_priorities(self):
"""
Returns a list of all priorities.
:return:
"""
url = 'rest/api/2/priority'
return self.get(url)
def get_all_global_project_roles(self):
"""
Get all the ProjectRoles available in Jira. Currently this list is global.
:return:
"""
url = 'rest/api/2/role'
return self.get(url)
def upload_plugin(self, plugin_path):
"""
Provide plugin path for upload into Jira e.g. useful for auto deploy
:param plugin_path:
:return:
"""
files = {
'plugin': open(plugin_path, 'rb')
}
headers = {
'X-Atlassian-Token': 'nocheck'
}
upm_token = self.request(method='GET', path='rest/plugins/1.0/', headers=headers, trailing=True).headers[
'upm-token']
url = 'rest/plugins/1.0/?token={upm_token}'.format(upm_token=upm_token)
return self.post(url, files=files, headers=headers)
def delete_plugin(self, plugin_key):
"""
Delete plugin
:param plugin_key:
:return:
"""
url = 'rest/plugins/1.0/{}-key'.format(plugin_key)
return self.delete(url)
def check_plugin_manager_status(self):
headers = {
'X-Atlassian-Token': 'nocheck',
'Content-Type': 'application/vnd.atl.plugins.safe.mode.flag+json'
}
url = 'rest/plugins/latest/safe-mode'
return self.request(method='GET', path=url, headers=headers)
# API/2 Get permissions
def get_permissions(self, project_id=None, project_key=None, issue_id=None, issue_key=None):
"""
Returns all permissions in the system and whether the currently logged in user has them.
You can optionally provide a specific context
to get permissions for (projectKey OR projectId OR issueKey OR issueId)
:param project_id: str
:param project_key: str
:param issue_id: str
:param issue_key: str
:return:
"""
url = 'rest/api/2/mypermissions'
params = {}
if project_id:
params['projectId'] = project_id
if project_key:
params['projectKey'] = project_key
if issue_id:
params['issueId'] = issue_id
if issue_key:
params['issueKey'] = issue_key
return self.get(url, params=params)
def get_all_permissions(self):
"""
Returns all permissions that are present in the Jira instance -
Global, Project and the global ones added by plugins
:return: All permissions
"""
url = 'rest/api/2/permissions'
return self.get(url)
def get_all_permissionschemes(self, expand=None):
"""
Returns a list of all permission schemes.
By default only shortened beans are returned.
If you want to include permissions of all the schemes,
then specify the permissions expand parameter.
Permissions will be included also if you specify any other expand parameter.
:param expand : permissions,user,group,projectRole,field,all
:return:
"""
url = 'rest/api/2/permissionscheme'
params = {}
if expand:
params['expand'] = expand
return (self.get(url, params=params) or {}).get('permissionSchemes')
def get_permissionscheme(self, permission_id, expand=None):
"""
Returns a list of all permission schemes.
By default only shortened beans are returned.
If you want to include permissions of all the schemes,
then specify the permissions expand parameter.
Permissions will be included also if you specify any other expand parameter.
:param permission_id
:param expand : permissions,user,group,projectRole,field,all
:return:
"""
url = 'rest/api/2/permissionscheme/{schemeID}'.format(schemeID=permission_id)
params = {}
if expand:
params['expand'] = expand
return self.get(url, params=params)
def set_permissionscheme_grant(self, permission_id, new_permission):
"""
Creates a permission grant in a permission scheme.
Example:
{
"holder": {
"type": "group",
"parameter": "jira-developers"
},
"permission": "ADMINISTER_PROJECTS"
}
:param permission_id
:param new_permission
:return:
"""
url = 'rest/api/2/permissionscheme/{schemeID}/permission'.format(schemeID=permission_id)
return self.post(url, data=new_permission)
def get_issue_security_schemes(self):
"""
Returns all issue security schemes that are defined
Administrator permission required
:return: list
"""
url = 'rest/api/2/issuesecurityschemes'
return self.get(url).get('issueSecuritySchemes')
def get_issue_security_scheme(self, scheme_id, only_levels=False):
"""
Returns the issue security scheme along with that are defined
Returned if the user has the administrator permission or if the scheme is used in a project in which the
user has the administrative permission
:param scheme_id: int
:param only_levels: bool
:return: list
"""
url = 'rest/api/2/issuesecurityschemes/{}'.format(scheme_id)
if only_levels is True:
return self.get(url).get('levels')
else:
return self.get(url)
def get_project_issue_security_scheme(self, project_id_or_key, only_levels=False):
"""
Returns the issue security scheme for project
Returned if the user has the administrator permission or if the scheme is used in a project in which the
user has the administrative permission
:param project_id_or_key: int
:param only_levels: bool
:return: list
"""
url = 'rest/api/2/project/{}/issuesecuritylevelscheme'.format(project_id_or_key)
if only_levels is True:
return self.get(url).get('levels')
else:
return self.get(url)
# Priority Schemes
def get_all_priority_schemes(self, start=0, limit=100, expand=None):
"""
Returns all priority schemes.
All project keys associated with the priority scheme will only be returned
if additional query parameter is provided expand=schemes.projectKeys.
:param start: the page offset, if not specified then defaults to 0
:param limit: how many results on the page should be included. Defaults to 100, maximum is 1000.
:param expand: can be 'schemes.projectKeys'
:return:
"""
url = 'rest/api/2/priorityschemes'
params = {}
if start:
params['startAt'] = int(start)
if limit:
params['maxResults'] = int(limit)
if expand:
params['expand'] = expand
return self.get(url, params=params)
def create_priority_scheme(self, data):
"""
Creates new priority scheme.
:param data:
{"name": "New priority scheme",
"description": "Priority scheme for very important projects",
"defaultOptionId": "3",
"optionIds": [
"1",
"2",
"3",
"4",
"5"
]}
:return: Returned if the priority scheme was created.
"""
return self.post(path="rest/api/2/priorityschemes", data=data)
# api/2/project/{projectKeyOrId}/priorityscheme
# Resource for associating priority schemes and projects.
def get_priority_scheme_of_project(self, project_key_or_id):
"""
Gets a full representation of a priority scheme in JSON format used by specified project.
User must be global administrator or project administrator.
:param project_key_or_id:
:return:
"""
url = 'rest/api/2/project/{}/priorityscheme'.format(project_key_or_id)
return self.get(url)
def assign_priority_scheme_for_project(self, project_key_or_id, priority_scheme_id):
"""
Assigns project with priority scheme. Priority scheme assign with migration is possible from the UI.
Operation will fail if migration is needed as a result of operation
eg. there are issues with priorities invalid in the destination scheme.
All project keys associated with the priority scheme will only be returned
if additional query parameter is provided expand=projectKeys.
:param project_key_or_id:
:param priority_scheme_id:
:return:
"""
url = "rest/api/2/project/{projectKeyOrId}/priorityscheme".format(projectKeyOrId=project_key_or_id)
data = {"id": priority_scheme_id}
return self.put(url, data=data)
# Application properties
def get_property(self, key=None, permission_level=None, key_filter=None):
"""
Returns an application property
:param key: str
:param permission_level: str
:param key_filter: str
:return: list or item
"""
url = 'rest/api/2/application-properties'
params = {}
if key:
params['key'] = key
if permission_level:
params['permissionLevel'] = permission_level
if key_filter:
params['keyFilter'] = key_filter
return self.get(url, params=params)
def set_property(self, property_id, value):
url = 'rest/api/2/application-properties/{}'.format(property_id)
data = {'id': property_id, 'value': value}
return self.put(url, data=data)
def get_advanced_settings(self):
"""
Returns the properties that are displayed on the "General Configuration > Advanced Settings" page
:return:
"""
url = 'rest/api/2/application-properties/advanced-settings'
return self.get(url)
"""
#######################################################################
# Tempo Account REST API implements #
#######################################################################
"""
def tempo_account_get_accounts(self, skip_archived=None, expand=None):
"""
Get all Accounts that the logged in user has permission to browse.
:param skip_archived: bool OPTIONAL: skip archived Accounts, either true or false, default value true.
:param expand: bool OPTIONAL: With expanded data or not
:return:
"""
params = {}
if skip_archived is not None:
params['skipArchived'] = skip_archived
if expand is not None:
params['expand'] = expand
url = 'rest/tempo-accounts/1/account'
return self.get(url, params=params)
def tempo_account_get_accounts_by_jira_project(self, project_id):
"""
Get Accounts by JIRA Project. The Caller must have the Browse Account permission for Account.
This will return Accounts for which the Caller has Browse Account Permission for.
:param project_id: str the project id.
:return:
"""
url = 'rest/tempo-accounts/1/account/project/{}'.format(project_id)
return self.get(url)
def tempo_account_associate_with_jira_project(self, account_id, project_id,
default_account=False,
link_type='MANUAL'):
"""
The AccountLinkBean for associate Account with project
Adds a link to an Account.
{
scopeType:PROJECT
defaultAccount:boolean
linkType:IMPORTED | MANUAL
name:string
key:string
accountId:number
scope:number
id:number
}
:param project_id:
:param account_id
:param default_account
:param link_type
:return:
"""
data = {}
if account_id:
data['accountId'] = account_id
if default_account:
data['defaultAccount'] = default_account
if link_type:
data['linkType'] = link_type
if project_id:
data['scope'] = project_id
data['scopeType'] = 'PROJECT'
url = 'rest/tempo-accounts/1/link/'
return self.post(url, data=data)
def tempo_account_add_account(self, data=None):
"""
Creates Account, adding new Account requires the Manage Accounts Permission.
:param data: String then it will convert to json
:return:
"""
url = 'rest/tempo-accounts/1/account/'
if data is None:
return """Please, provide data e.g.
{name: "12312312321",
key: "1231231232",
lead: {name: "myusername"},
}
detail info: http://developer.tempo.io/doc/accounts/api/rest/latest/#-700314780
"""
return self.post(url, data=data)
def tempo_account_delete_account_by_id(self, account_id):
"""
Delete an Account by id. Caller must have the Manage Account Permission for the Account.
The Account can not be deleted if it has an AccountLinkBean.
:param account_id: the id of the Account to be deleted.
:return:
"""
url = 'rest/tempo-accounts/1/account/{id}/'.format(id=account_id)
return self.delete(url)
def tempo_account_get_rate_table_by_account_id(self, account_id):
"""
Returns a rate table for the specified account.
:param account_id: the account id.
:return:
"""
params = {'scopeType': "ACCOUNT", 'scopeId': account_id}
url = 'rest/tempo-accounts/1/ratetable'
return self.get(url, params=params)
def tempo_account_get_all_account_by_customer_id(self, customer_id):
"""
Get un-archived Accounts by customer. The Caller must have the Browse Account permission for the Account.
:param customer_id: the Customer id.
:return:
"""
url = 'rest/tempo-accounts/1/account/customer/{customerId}/'.format(customerId=customer_id)
return self.get(url)
def tempo_account_get_customers(self, query=None, count_accounts=None):
"""
Gets all or some Attribute whose key or name contain a specific substring.
Attributes can be a Category or Customer.
:param query: OPTIONAL: query for search
:param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer
:return: list of customers
"""
params = {}
if query is not None:
params['query'] = query
if count_accounts is not None:
params['countAccounts'] = count_accounts
url = 'rest/tempo-accounts/1/customer'
return self.get(url, params=params)
def tempo_account_add_new_customer(self, key, name):
"""
Gets all or some Attribute whose key or name contain a specific substring.
Attributes can be a Category or Customer.
:param key:
:param name:
:return: if error will show in error log, like validation unsuccessful. If success will good.
"""
data = {'name': name, 'key': key}
url = 'rest/tempo-accounts/1/customer'
return self.post(url, data=data)
def tempo_account_add_customer(self, data=None):
"""
Gets all or some Attribute whose key or name contain a specific substring.
Attributes can be a Category or Customer.
:param data:
:return: if error will show in error log, like validation unsuccessful. If success will good.
"""
if data is None:
return """Please, set the data as { isNew:boolean
name:string
key:string
id:number } or you can put only name and key parameters"""
url = 'rest/tempo-accounts/1/customer'
return self.post(url, data=data)
def tempo_account_get_customer_by_id(self, customer_id=1):
"""
Get Account Attribute whose key or name contain a specific substring. Attribute can be a Category or Customer.
:param customer_id: id of Customer record
:return: Customer info
"""
url = 'rest/tempo-accounts/1/customer/{id}'.format(id=customer_id)
return self.get(url)
def tempo_account_update_customer_by_id(self, customer_id=1, data=None):
"""
Updates an Attribute. Caller must have Manage Account Permission. Attribute can be a Category or Customer.
:param customer_id: id of Customer record
:param data: format is
{
isNew:boolean
name:string
key:string
id:number
}
:return: json with parameters name, key and id.
"""
if data is None:
return """Please, set the data as { isNew:boolean
name:string
key:string
id:number }"""
url = 'rest/tempo-accounts/1/customer/{id}'.format(id=customer_id)
return self.put(url, data=data)
def tempo_account_delete_customer_by_id(self, customer_id=1):
"""
Delete an Attribute. Caller must have Manage Account Permission. Attribute can be a Category or Customer.
:param customer_id: id of Customer record
:return: Customer info
"""
url = 'rest/tempo-accounts/1/customer/{id}'.format(id=customer_id)
return self.delete(url)
def tempo_account_export_accounts(self):
"""
Get csv export file of Accounts from Tempo
:return: csv file
"""
headers = self.form_token_headers
url = 'rest/tempo-accounts/1/export'
return self.get(url, headers=headers, not_json_response=True)
def tempo_holiday_get_schemes(self):
"""
Provide a holiday schemes
:return:
"""
url = 'rest/tempo-core/2/holidayschemes/'
return self.get(url)
def tempo_holiday_get_scheme_info(self, scheme_id):
"""
Provide a holiday scheme
:return:
"""
url = 'rest/tempo-core/2/holidayschemes/{}'.format(scheme_id)
return self.get(url)
def tempo_holiday_get_scheme_members(self, scheme_id):
"""
Provide a holiday scheme members
:return:
"""
url = 'rest/tempo-core/2/holidayschemes/{}/members'.format(scheme_id)
return self.get(url)
def tempo_holiday_put_into_scheme_member(self, scheme_id, username):
"""
Provide a holiday scheme
:return:
"""
url = 'rest/tempo-core/2/holidayschemes/{}/member/{}/'.format(scheme_id, username)
data = {'id': scheme_id}
return self.put(url, data=data)
def tempo_holiday_scheme_set_default(self, scheme_id):
"""
Set as default the holiday scheme
:param scheme_id:
:return:
"""
# @deprecated available in private mode the 1 version
# url = 'rest/tempo-core/1/holidayscheme/setDefault/{}'.format(scheme_id)
url = 'rest/tempo-core/2/holidayscheme/setDefault/{}'.format(scheme_id)
data = {'id': scheme_id}
return self.post(url, data=data)
def tempo_workload_scheme_get_members(self, scheme_id):
"""
Provide a workload scheme members
:param scheme_id:
:return:
"""
url = 'rest/tempo-core/1/workloadscheme/users/{}'.format(scheme_id)
return self.get(url)
def tempo_workload_scheme_set_member(self, scheme_id, member):
"""
Provide a workload scheme members
:param member: user name of user
:param scheme_id:
:return:
"""
url = 'rest/tempo-core/1/workloadscheme/user/{}'.format(member)
data = {'id': scheme_id}
return self.put(url, data=data)
def tempo_timesheets_get_configuration(self):
"""
Provide the configs of timesheets
:return:
"""
url = 'rest/tempo-timesheets/3/private/config/'
return self.get(url)
def tempo_timesheets_get_team_utilization(self, team_id, date_from, date_to=None, group_by=None):
"""
GEt team utulization. Response in json
:param team_id:
:param date_from:
:param date_to:
:param group_by:
:return:
"""
url = 'rest/tempo-timesheets/3/report/team/{}/utilization'.format(team_id)
params = {'dateFrom': date_from,
'dateTo': date_to}
if group_by:
params['groupBy'] = group_by
return self.get(url, params=params)
def tempo_timesheets_get_worklogs(self, date_from=None, date_to=None, username=None, project_key=None,
account_key=None, team_id=None):
"""
:param date_from: yyyy-MM-dd
:param date_to: yyyy-MM-dd
:param username: name of the user you wish to get the worklogs for
:param project_key: key of a project you wish to get the worklogs for
:param account_key: key of an account you wish to get the worklogs for
:param team_id: id of the Team you wish to get the worklogs for
:return:
"""
params = {}
if date_from:
params['dateFrom'] = date_from
if date_to:
params['dateTo'] = date_to
if username:
params['username'] = username
if project_key:
params['projectKey'] = project_key
if account_key:
params['accountKey'] = account_key
if team_id:
params['teamId'] = team_id
url = 'rest/tempo-timesheets/3/worklogs/'
return self.get(url, params=params)
def tempo_4_timesheets_find_worklogs(self, **params):
"""
Find existing worklogs with searching parameters.
NOTE: check if you are using correct types for the parameters!
:param from: string From Date
:param to: string To Date
:param worker: Array of strings
:param taskId: Array of integers
:param taskKey: Array of strings
:param projectId: Array of integers
:param projectKey: Array of strings
:param teamId: Array of integers
:param roleId: Array of integers
:param accountId: Array of integers
:param accountKey: Array of strings
:param filterId: Array of integers
:param customerId: Array of integers
:param categoryId: Array of integers
:param categoryTypeId: Array of integers
:param epicKey: Array of strings
:param updatedFrom: string
:param includeSubtasks: boolean
:param pageNo: integer
:param maxResults: integer
:param offset: integer
"""
url = "rest/tempo-timesheets/4/worklogs/search"
return self.post(url, data=params)
def tempo_timesheets_get_worklogs_by_issue(self, issue):
"""
Get Tempo timesheet worklog by issue key or id.
:param issue: Issue key or Id
:return:
"""
url = "rest/tempo-timesheets/4/worklogs/jira/issue/{issue}".format(issue=issue)
return self.get(url)
def tempo_timesheets_write_worklog(self, worker, started, time_spend_in_seconds, issue_id, comment=None):
"""
Log work for user
:param worker:
:param started:
:param time_spend_in_seconds:
:param issue_id:
:param comment:
:return:
"""
data = {"worker": worker,
"started": started,
"timeSpentSeconds": time_spend_in_seconds,
"originTaskId": str(issue_id)}
if comment:
data['comment'] = comment
url = 'rest/tempo-timesheets/4/worklogs/'
return self.post(url, data=data)
def tempo_timesheets_approval_worklog_report(self, user_key, period_start_date):
"""
Return timesheets for approval
:param user_key:
:param period_start_date:
:return:
"""
url = "rest/tempo-timesheets/4/timesheet-approval/current"
params = {}
if period_start_date:
params['periodStartDate'] = period_start_date
if user_key:
params['userKey'] = user_key
return self.get(url, params=params)
def tempo_timesheets_get_required_times(self, from_date, to_date, user_name):
"""
Provide time how much should work
:param from_date:
:param to_date:
:param user_name:
:return:
"""
url = 'rest/tempo-timesheets/3/private/days'
params = {}
if from_date:
params['from'] = from_date
if to_date:
params['to'] = to_date
if user_name:
params['user'] = user_name
return self.get(url, params=params)
def tempo_timesheets_approval_status(self, period_start_date, user_name):
url = 'rest/tempo-timesheets/4/timesheet-approval/approval-statuses'
params = {}
if user_name:
params['userKey'] = user_name
if period_start_date:
params['periodStartDate'] = period_start_date
return self.get(url, params=params)
def tempo_get_links_to_project(self, project_id):
"""
Gets all links to a specific project
:param project_id:
:return:
"""
url = 'rest/tempo-accounts/1/link/project/{}/'.format(project_id)
return self.get(url)
def tempo_get_default_link_to_project(self, project_id):
"""
Gets the default link to a specific project
:param project_id:
:return:
"""
url = 'rest/tempo-accounts/1/link/project/{}/default/'.format(project_id)
return self.get(url)
def tempo_teams_get_all_teams(self, expand=None):
url = "rest/tempo-teams/2/team"
params = {}
if expand:
params['expand'] = expand
return self.get(url, params=params)
def tempo_teams_add_member(self, team_id, member_key):
"""
Add team member
:param team_id:
:param member_key: user_name or user_key of Jira
:return:
"""
data = {"member": {"key": str(member_key), "type": "USER"},
"membership": {"availability": "100",
"role": {"id": 1}}
}
return self.tempo_teams_add_member_raw(team_id, member_data=data)
def tempo_teams_add_membership(self, team_id, member_id):
"""
Add team member
:param team_id:
:param member_id:
:return:
"""
data = {"teamMemberId": member_id,
"teamId": team_id,
"availability": "100",
"role": {"id": 1}
}
url = "rest/tempo-teams/2/team/{}/member/{}/membership".format(team_id, member_id)
return self.post(url, data=data)
def tempo_teams_add_member_raw(self, team_id, member_data):
"""
Add team member
:param team_id:
:param member_data:
:return:
"""
url = 'rest/tempo-teams/2/team/{}/member/'.format(team_id)
data = member_data
return self.post(url, data=data)
def tempo_teams_get_members(self, team_id):
"""
Get members from team
:param team_id:
:return:
"""
url = 'rest/tempo-teams/2/team/{}/member/'.format(team_id)
return self.get(url)
def tempo_teams_remove_member(self, team_id, member_id, membership_id):
"""
Remove team membership
:param team_id:
:param member_id:
:param membership_id:
:return:
"""
url = 'rest/tempo-teams/2/team/{}/member/{}/membership/{}'.format(team_id, member_id, membership_id)
return self.delete(url)
def tempo_teams_update_member_information(self, team_id, member_id, membership_id, data):
"""
Update team membership attribute info
:param team_id:
:param member_id:
:param membership_id:
:param data:
:return:
"""
url = 'rest/tempo-teams/2/team/{}/member/{}/membership/{}'.format(team_id, member_id, membership_id)
return self.put(url, data=data)
def tempo_timesheets_get_period_configuration(self):
return self.get('rest/tempo-timesheets/3/period-configuration')
def tempo_timesheets_get_private_configuration(self):
return self.get('rest/tempo-timesheets/3/private/config')
def tempo_teams_get_memberships_for_member(self, username):
return self.get('rest/tempo-teams/2/user/{}/memberships'.format(username))
"""
#######################################################################
# Agile(Formerly Greenhopper) REST API implements #
#######################################################################
"""
def get_all_agile_boards(self, board_name=None, project_key=None, board_type=None, start=0, limit=50):
"""
Returns all boards. This only includes boards that the user has permission to view.
:param board_name:
:param project_key:
:param board_type:
:param start:
:param limit:
:return:
"""
url = 'rest/agile/1.0/board'
params = {}
if board_name:
params['name'] = board_name
if project_key:
params['projectKeyOrId'] = project_key
if board_type:
params['type'] = board_type
if start:
params['startAt'] = int(start)
if limit:
params['maxResults'] = int(limit)
return self.get(url, params=params)
def get_agile_board(self, board_id):
"""
Get agile board info by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.get(url)
def create_agile_board(self, name, type, filter_id, location=None):
"""
Create an agile board
:param name: str
:param type: str, scrum or kanban
:param filter_id: int
:param location: dict, Optional. Default is user
"""
data = {'name': name,
'type': type,
'filterId': filter_id}
if location:
data['location'] = location
else:
data['location'] = {'type': 'user'}
url = 'rest/agile/1.0/board'
return self.post(url, data=data)
def get_agile_board_by_filter_id(self, filter_id):
"""
Gets an agile board by the filter id
:param filter_id: int, str
"""
url = 'rest/agile/1.0/board/filter/{filter_id}'.format(filter_id=filter_id)
return self.get(url)
def get_agile_board_configuration(self, board_id):
"""
Get the board configuration. The response contains the following fields:
id - Id of the board.
name - Name of the board.
filter - Reference to the filter used by the given board.
subQuery (Kanban only) - JQL subquery used by the given board.
columnConfig - The column configuration lists the columns for the board,
in the order defined in the column configuration. For each column,
it shows the issue status mapping as well as the constraint type
(Valid values: none, issueCount, issueCountExclSubs) for
the min/max number of issues. Note, the last column with statuses
mapped to it is treated as the "Done" column, which means that issues
in that column will be marked as already completed.
estimation (Scrum only) - Contains information about type of estimation used for the board.
Valid values: none, issueCount, field. If the estimation type is "field",
the Id and display name of the field used for estimation is also returned.
Note, estimates for an issue can be updated by a PUT /rest/api/2/issue/{issueIdOrKey}
request, however the fields must be on the screen. "timeoriginalestimate" field will never be
on the screen, so in order to update it "originalEstimate" in "timetracking" field should be updated.
ranking - Contains information about custom field used for ranking in the given board.
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}/configuration'.format(str(board_id))
return self.get(url)
def get_issues_for_backlog(self, board_id):
"""
:param board_id: int, str
"""
url = 'rest/agile/1.0/{board_id}/backlog'.format(board_id=board_id)
return self.get(url)
def delete_agile_board(self, board_id):
"""
Delete agile board by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.delete(url)
def get_agile_board_properties(self, board_id):
"""
Gets a list of all the board properties
:param board_id: int, str
"""
url = 'rest/agile/1.0/board/{board_id}/properties'.format(board_id=board_id)
return self.get(url)
def get_all_sprint(self, board_id, state=None, start=0, limit=50):
"""
Returns all sprints from a board, for a given board Id.
This only includes sprints that the user has permission to view.
:param board_id:
:param state: Filters results to sprints in specified states.
Valid values: future, active, closed.
You can define multiple states separated by commas, e.g. state=active,closed
:param start: The starting index of the returned sprints.
Base index: 0.
See the 'Pagination' section at the top of this page for more details.
:param limit: The maximum number of sprints to return per page.
Default: 50.
See the 'Pagination' section at the top of this page for more details.
:return:
"""
params = {}
if start:
params['startAt'] = start
if limit:
params['maxResults'] = limit
if state:
params['state'] = state
url = 'rest/agile/1.0/board/{boardId}/sprint'.format(boardId=board_id)
return self.get(url, params=params)
def get_sprint(self, sprint_id):
"""
Returns the sprint for a given sprint Id.
The sprint will only be returned if the user can view the board that the sprint was created on,
or view at least one of the issues in the sprint.
:param sprint_id:
:return:
"""
url = 'rest/agile/1.0/sprint/{sprintId}'.format(sprintId=sprint_id)
return self.get(url)
def rename_sprint(self, sprint_id, name, start_date, end_date):
"""
:param sprint_id:
:param name:
:param start_date:
:param end_date:
:return:
"""
return self.put('rest/greenhopper/1.0/sprint/{0}'.format(sprint_id), data={
'name': name,
'startDate': start_date,
'endDate': end_date})
def delete_sprint(self, sprint_id):
"""
Deletes a sprint.
Once a sprint is deleted, all issues in the sprint will be moved to the backlog.
Note, only future sprints can be deleted.
:param sprint_id:
:return:
"""
return self.delete('rest/agile/1.0/sprint/{sprintId}'.format(sprintId=sprint_id))
def update_partially_sprint(self, sprint_id, data):
"""
Performs a partial update of a sprint.
A partial update means that fields not present in the request JSON will not be updated.
Notes:
Sprints that are in a closed state cannot be updated.
A sprint can be started by updating the state to 'active'.
This requires the sprint to be in the 'future' state and have a startDate and endDate set.
A sprint can be completed by updating the state to 'closed'.
This action requires the sprint to be in the 'active' state.
This sets the completeDate to the time of the request.
Other changes to state are not allowed.
The completeDate field cannot be updated manually.
:param sprint_id:
:param data: { "name": "new name"}
:return:
"""
return self.post('rest/agile/1.0/sprint/{}'.format(sprint_id), data=data)
def get_sprint_issues(self, sprint_id, start, limit):
"""
Returns all issues in a sprint, for a given sprint Id.
This only includes issues that the user has permission to view.
By default, the returned issues are ordered by rank.
:param sprint_id:
:param start: The starting index of the returned issues.
Base index: 0.
See the 'Pagination' section at the top of this page for more details.
:param limit: The maximum number of issues to return per page.
Default: 50.
See the 'Pagination' section at the top of this page for more details.
Note, the total number of issues returned is limited by the property
'jira.search.views.default.max' in your Jira instance.
If you exceed this limit, your results will be truncated.
:return:
"""
params = {}
if start:
params['startAt'] = start
if limit:
params['maxResults'] = limit
url = 'rest/agile/1.0/sprint/{sprintId}/issue'.format(sprintId=sprint_id)
return self.get(url, params=params)
def update_rank(self, issues_to_rank, rank_before, customfield_number):
"""
Updates the rank of issues (max 50), placing them before a given issue.
:param issues_to_rank: List of issues to rank (max 50)
:param rank_before: Issue that the issues will be put over
:param customfield_number: The number of the custom field Rank
:return:
"""
return self.put('rest/agile/1.0/issue/rank', data={
'issues': issues_to_rank,
'rankBeforeIssue': rank_before,
'rankCustomFieldId': customfield_number})
def health_check(self):
"""
Get health status
https://confluence.atlassian.com/jirakb/how-to-retrieve-health-check-results-using-rest-api-867195158.html
:return:
"""
# check as Troubleshooting & Support Tools Plugin
response = self.get('rest/troubleshooting/1.0/check/')
if not response:
# check as support tools
response = self.get('rest/supportHealthCheck/1.0/check/')
return response
# Audit Records
def get_audit_records(self, offset=None, limit=None, filter=None, from_date=None, to_date=None):
"""
Returns auditing records filtered using provided parameters
:param offset: the number of record from which search starts
:param limit: maximum number of returned results (if is limit is <= 0 or > 1000,
it will be set do default value: 1000)
:param filter: string = text query; each record that will be returned
must contain the provided text in one of its fields
:param from_date: string - timestamp in past; 'from' must be less or equal 'to',
otherwise the result set will be empty only records that
where created in the same moment or after the 'from' timestamp will be provided in response
:param to_date: string - timestamp in past; 'from' must be less or equal 'to',
otherwise the result set will be empty only records that
where created in the same moment or earlier than the 'to'
timestamp will be provided in response
:return:
"""
params = {}
if offset:
params["offset"] = offset
if limit:
params["limit"] = limit
if filter:
params["filter"] = filter
if from_date:
params["from"] = from_date
if to_date:
params["to"] = to_date
url = "rest/api/2/auditing/record"
return self.get(url, params=params) or {}
def post_audit_record(self, audit_record):
"""
Store a record in Audit Log
:param audit_record: json with compat https://docs.atlassian.com/jira/REST/schema/audit-record#
:return:
"""
url = "rest/api/2/auditing/record"
return self.post(url, data=audit_record)
|
var assert = require("assert");
CONST = require("../js/common/constants.js");
var obj = require("../js/common/gameStats.js");
describe('Game stats', function() {
describe('batman', function () {
it('total should return 0 when no values set', function () {
assert.equal(obj.batman.total, '0');
});
it('total should return 50 when only first level has score of 50', function () {
obj.batman.jumping.score += 50;
assert.equal(obj.batman.total, '50');
});
it('first level should return score correctly', function () {
assert.equal(obj.batman.jumping.score, '50');
});
it('total should return 150 when each level has score of 50', function () {
obj.batman.logic.score += 50;
obj.batman.asteroid.score += 50;
assert.equal(obj.batman.total, '150');
});
});
describe('superman', function () {
it('total should return 0 when no values set', function () {
assert.equal(obj.superman.total, '0');
});
it('total should return 50 when only first level has score of 50', function () {
obj.superman.jumping.score += 50;
assert.equal(obj.superman.total, '50');
});
it('total should return 150 when each level has score of 50', function () {
obj.superman.logic.score += 50;
obj.superman.asteroid.score += 50;
assert.equal(obj.superman.total, '150');
});
it('third level should return score correctly', function () {
assert.equal(obj.superman.logic.score, '50');
});
});
});
describe('CONST', function() {
it('world height must be greater than 0', function () {
assert.equal(CONST.game.world.height > 0, true);
});
it('world width must be greater than 0', function () {
assert.equal(CONST.game.world.width > 0, true);
});
it('world height must be less than width', function () {
assert.equal(CONST.game.world.width > CONST.game.world.height, true);
});
it('player\'s initial lives should be equal to 5' , function () {
assert.equal(CONST.player.initialLives, 5);
});
it('direction.left returns right value', function () {
assert.equal(CONST.direction.left, 'left');
});
it('direction.right returns right value', function () {
assert.equal(CONST.direction.right, 'right');
});
}); |
import { createMuiTheme } from '@material-ui/core/styles'
const theme = createMuiTheme({
typography: {
useNextVariants: true,
},
palette: {
primary: {
light: '#bee3b1',
main: '#86b376',
dark: '#4f7841',
contrastText: '#d5f1f5',
},
secondary: {
light: '#e39b96',
main: '#fc42ff',
dark: '#7a150d',
contrastText: '#d5f1f5',
},
openTitle: '#629c9a',
protectedTitle: '#fc42ff',
type: 'light'
}
})
export default theme |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, ModuleList
class ConvUpsample(BaseModule):
"""ConvUpsample performs 2x upsampling after Conv.
There are several `ConvModule` layers. In the first few layers, upsampling
will be applied after each layer of convolution. The number of upsampling
must be no more than the number of ConvModule layers.
Args:
in_channels (int): Number of channels in the input feature map.
inner_channels (int): Number of channels produced by the convolution.
num_layers (int): Number of convolution layers.
num_upsample (int | optional): Number of upsampling layer. Must be no
more than num_layers. Upsampling will be applied after the first
``num_upsample`` layers of convolution. Default: ``num_layers``.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict): Config dict for initialization. Default: None.
kwargs (key word augments): Other augments used in ConvModule.
"""
def __init__(self,
in_channels,
inner_channels,
num_layers=1,
num_upsample=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
super(ConvUpsample, self).__init__(init_cfg)
if num_upsample is None:
num_upsample = num_layers
assert num_upsample <= num_layers, \
f'num_upsample({num_upsample})must be no more than ' \
f'num_layers({num_layers})'
self.num_layers = num_layers
self.num_upsample = num_upsample
self.conv = ModuleList()
for i in range(num_layers):
self.conv.append(
ConvModule(
in_channels,
inner_channels,
3,
padding=1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
in_channels = inner_channels
def forward(self, x):
num_upsample = self.num_upsample
for i in range(self.num_layers):
x = self.conv[i](x)
if num_upsample > 0:
num_upsample -= 1
x = F.interpolate(
x, scale_factor=2, mode='bilinear', align_corners=False)
return x
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from utils import paddle_includes, extra_compile_args
from paddle.utils.cpp_extension import CUDAExtension, setup
setup(
name='simple_setup_relu2',
ext_modules=[
CUDAExtension(
name='simple_setup_relu2',
sources=['relu_op_simple.cc', 'relu_op_simple.cu'],
include_dirs=paddle_includes,
extra_compile_args=extra_compile_args)
])
|
from argparse import ArgumentParser
from elasticsearch import Elasticsearch
import os
import yaml
import getpass
from escat.utils import get_nested_config_values
def get_config_from_file(cluster_name: str, config_file: str):
with open(config_file, 'r') as _config_file:
config: dict = yaml.safe_load(_config_file)['clusters'][cluster_name]
if 'auth' in config:
if config['auth']['password']['ask']:
config['auth']['password'] = getpass.getpass('Enter password: ')
else:
config['auth']['password'] = config['auth']['password']['value']
return config
def get_es_client(cluster_name: str, config_file: str):
config = get_config_from_file(cluster_name, config_file)
return Elasticsearch(
hosts=config['hosts'],
http_auth=(get_nested_config_values(config.get('auth'), 'username', ''),
get_nested_config_values(config.get('auth'), 'password', '')),
use_ssl=get_nested_config_values(config.get('ssl'), 'enabled', False),
ca_certs=get_nested_config_values(config.get('ssl'), 'ca_certs', []),
client_cert=get_nested_config_values(config.get('ssl'), 'cert', ""),
client_key=get_nested_config_values(config.get('ssl'), 'key', "")
)
def parse_command_line_args(command_list, args):
home = os.path.expanduser('~')
default_config = os.path.join(home, 'escat.yml')
argument_parser = ArgumentParser(description='Command line tools for management of Elasticsearch Clusters')
argument_parser.add_argument('-c', '--cluster', help='The config profile to use', default='default', type=str)
argument_parser.add_argument('--config', help='Path to config file', default=default_config)
argument_parser.add_argument('module', choices=command_list)
argument_parser.add_argument('-v', '--verbose', help='Whether to print output with headers', action='store_true', default=False)
argument_parser.add_argument('-f', '--format', choices=['json', 'text'], default='text')
argument_parser.add_argument('-t', '--headers',type=str, help='Comma separated list of headers to return')
argument_parser.add_argument('-b', '--bytes', choices=['b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'],
help='Which format to display the bytes metrics in.Only valid for recovery module')
argument_parser.add_argument('-i', '--indices', help='Comma separated list of indices', type=str)
argument_parser.add_argument('-a', '--aliases', help='Comma separated list of alises', type=str)
argument_parser.add_argument('--fields', help='Comma separated list of fields', type=str)
argument_parser.add_argument('--thread-pool-patterns', help='Comma separated list of regex of required thread pool patterns', type=str)
argument_parser.add_argument('--repo', '--snapshot-repo', help='Name of the repository of whose the snapshots are queried', type=str)
argument_parser.add_argument('--template', '--template-name', help='Name of the template to lookup', type=str)
return argument_parser.parse_args(args)
def get_common_cat_api_params(namespace):
params = {}
verbose = namespace.verbose
if verbose is not None:
params['v'] = str(verbose).lower()
headers = namespace.headers
if headers is not None:
params['h'] = headers
b = namespace.bytes
if b is not None:
params['bytes'] = b
params['format'] = namespace.format
return params
|
#pragma once
#include "backend.h"
#include <util/generic/ptr.h>
#include <functional>
class TThreadedLogBackend: public TLogBackend {
public:
TThreadedLogBackend(TLogBackend* slave);
TThreadedLogBackend(TLogBackend* slave, size_t queuelen, std::function<void()> queueOverflowCallback = {});
~TThreadedLogBackend() override;
void WriteData(const TLogRecord& rec) override;
void ReopenLog() override;
void ReopenLogNoFlush() override;
size_t QueueSize() const override;
// Write an emergency message when the memory allocator is corrupted.
// The TThreadedLogBackend object can't be used after this method is called.
void WriteEmergencyData(const TLogRecord& rec);
private:
class TImpl;
THolder<TImpl> Impl_;
};
class TOwningThreadedLogBackend: private THolder<TLogBackend>, public TThreadedLogBackend {
public:
TOwningThreadedLogBackend(TLogBackend* slave);
TOwningThreadedLogBackend(TLogBackend* slave, size_t queuelen, std::function<void()> queueOverflowCallback = {});
~TOwningThreadedLogBackend() override;
};
|
import { resetReviewForm } from './reviewForm'
import { getConcerts } from '../concerts/concerts'
// synchronous actions
export const setReviews = reviews => {
return {
type: "SET_REVIEWS",
reviews
}
}
export const setUserReviews = reviews => {
return {
type: "SET_USER_REVIEWS",
reviews
}
}
export const setConcertReviews = reviews => {
return {
type: "SET_CONCERT_REVIEWS",
reviews
}
}
export const clearReviews = () => {
return {
type: "CLEAR_REVIEWS"
}
}
export const addReview = review => {
return {
type: "ADD_REVIEW",
review
}
}
export const deleteReviewSuccess = reviewId => {
return {
type: "DELETE_REVIEW",
reviewId
}
}
export const updateReviewSuccess = review => {
return {
type: "UPDATE_REVIEW",
review
}
}
// async actions
export const getReviews = () => {
return dispatch => {
return fetch(`https://moss-backend.herokuapp.com/api/v1/reviews`, {
credentials: "include",
method: "GET",
headers: {
"Content-Type": "application/json"
},
})
.then(r => r.json())
.then(response => {
if (response.error) {
alert(response.error)
} else {
dispatch(setReviews(response.data))
}
})
.catch(console.log)
}
}
export const getConcertReviews = (concertId) => {
return dispatch => {
return fetch(`https://moss-backend.herokuapp.com/api/v1/concerts/${concertId}/reviews`, {
credentials: "include",
method: "GET",
headers: {
"Content-Type": "application/json"
},
})
.then(r => r.json())
.then(response => {
if (response.error) {
alert(response.error)
} else {
dispatch(setConcertReviews(response.data))
}
})
.catch(console.log)
}
}
export const getUserReviews = (userId) => {
return dispatch => {
return fetch(`https://moss-backend.herokuapp.com/api/v1/users/${userId}/reviews`, {
credentials: "include",
method: "GET",
headers: {
"Content-Type": "application/json"
},
})
.then(r => r.json())
.then(response => {
if (response.error) {
alert(response.error)
} else {
dispatch(setUserReviews(response.data))
}
})
.catch(console.log)
}
}
export const createReview = (reviewData, history) => {
return dispatch => {
const sendableReviewData = {
venue_score :reviewData.venue_score,
sound_score :reviewData.sound_score,
performance_score :reviewData.performance_score,
set_score :reviewData.set_score,
price :reviewData.price,
write_up :reviewData.write_up,
user_id :reviewData.userId,
concert_id: reviewData.concertId
}
return fetch("https://moss-backend.herokuapp.com/api/v1/reviews", {
credentials: "include",
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(sendableReviewData)
})
.then(r => r.json())
.then(resp => {
if (resp.error) {
alert(resp.error)
} else {
console.log(resp.data)
dispatch(addReview(resp.data))
dispatch(getConcerts(resp.data.attributes.concert.artist))
dispatch(resetReviewForm())
history.push(`/reviews/${resp.data.id}`)
}
})
.catch(console.log)
}
}
export const updateReview = (reviewData, history) => {
return dispatch => {
const sendableReviewData = {
venue_score :reviewData.venue_score,
sound_score :reviewData.sound_score,
performance_score :reviewData.performance_score,
set_score :reviewData.set_score,
price :reviewData.price,
write_up :reviewData.write_up,
user_id :reviewData.userId,
concert_id :reviewData.concertId
}
return fetch(`https://moss-backend.herokuapp.com/api/v1/reviews/${reviewData.reviewId}`, {
credentials: "include",
method: "PATCH",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(sendableReviewData)
})
.then(r => r.json())
.then(resp => {
if (resp.error) {
alert(resp.error)
} else {
dispatch(updateReviewSuccess(resp.data))
dispatch(getConcertReviews(resp.data.attributes.concert.id))
dispatch(getConcerts(resp.data.attributes.concert.artist))
dispatch(resetReviewForm())
history.push(`/reviews/${resp.data.id}`)
}
})
.catch(console.log)
}
}
export const deleteReview = (reviewId, history, concertId, userId) => {
return dispatch => {
return fetch(`https://moss-backend.herokuapp.com/api/v1/reviews/${reviewId}`, {
credentials: "include",
method: "DELETE",
headers: {
"Content-Type": "application/json"
},
})
.then(r => r.json())
.then(resp => {
if (resp.error) {
alert(resp.error)
} else {
console.log(resp)
dispatch(deleteReviewSuccess(reviewId))
dispatch(getUserReviews(userId))
dispatch(getConcertReviews(concertId))
history.push(`/concerts/${concertId}`)
}
})
.catch(console.log)
}
}
|
/*------------------------------------------------------------------------
* filename - _isnan.c
*
* function(s)
*
* _isnan - double version
*
* These functions return non-zero if the value passed in is +-NAN, else 0.
* More specifically, the non-zero return value = _FPCLASS_SNAN if the NAN
* is of the signaling type, or _FPCLASS_QNAN if the NAN is of the quiet type.
*
*-----------------------------------------------------------------------*/
/*
* C/C++ Run Time Library - Version 10.0
*
* Copyright (c) 1998, 2000 by Inprise Corporation
* All Rights Reserved.
*
*/
/* $Revision: 9.1 $ */
#include <float.h>
#include <_float.h>
int _RTLENTRY _EXPFUNC _isnan (double _d)
{
_rtl_double_t *p = (_rtl_double_t *) &_d;
if ((p->i64.s.high32.value & 0x7FF00000) == 0x7FF00000)
if (((p->i64.s.high32.value & 0x000FFFFF) == 0) &&
(p->i64.s.low32.value == 0))
return 0;
else
{
if ((p->i64.s.high32.value & 0x00080000) == 0)
return _FPCLASS_SNAN; /* Signaling NAN returns 1 */
else
return _FPCLASS_QNAN; /* Quiet NAN returns 2 */
}
return 0;
}
|
/*
* This header is generated by classdump-dyld 1.0
* on Sunday, September 27, 2020 at 11:51:39 AM Mountain Standard Time
* Operating System: Version 14.0 (Build 18A373)
* Image Source: /System/Library/PrivateFrameworks/Transparency.framework/Transparency
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos.
*/
@class TransparencySFAnalytics;
@interface TransparencyAnalytics : NSObject {
TransparencySFAnalytics* _sfanalytics;
}
@property (retain) TransparencySFAnalytics * sfanalytics; //@synthesize sfanalytics=_sfanalytics - In the implementation block
+(id)logger;
+(id)formatEventName:(id)arg1 application:(id)arg2 ;
+(unsigned long long)doKTResultWithAnalyticsForEventName:(id)arg1 error:(id*)arg2 block:(/*^block*/id)arg3 ;
+(BOOL)hasInternalDiagnostics;
+(BOOL)doWithAnalyticsForEventName:(id)arg1 error:(id*)arg2 block:(/*^block*/id)arg3 ;
-(void)logResultForEvent:(id)arg1 hardFailure:(BOOL)arg2 result:(id)arg3 ;
-(void)logResultForEvent:(id)arg1 hardFailure:(BOOL)arg2 result:(id)arg3 withAttributes:(id)arg4 ;
-(void)noteEventNamed:(id)arg1 ;
-(void)logSuccessForEventNamed:(id)arg1 ;
-(void)logHardFailureForEventNamed:(id)arg1 withAttributes:(id)arg2 ;
-(void)logSoftFailureForEventNamed:(id)arg1 withAttributes:(id)arg2 ;
-(void)removeMultiSamplerForName:(id)arg1 ;
-(void)logMetric:(id)arg1 withName:(id)arg2 ;
-(void)addMultiSamplerForName:(id)arg1 withTimeInterval:(double)arg2 block:(/*^block*/id)arg3 ;
-(void)setSfanalytics:(TransparencySFAnalytics *)arg1 ;
-(TransparencySFAnalytics *)sfanalytics;
@end
|
from typing import Tuple
from hypothesis import given
from symba.base import Expression
from ground.base import Context
from ground.hints import Multisegment
from tests.utils import (reverse_multisegment,
reverse_multisegment_coordinates)
from . import strategies
@given(strategies.contexts_with_multisegments)
def test_basic(context_with_multisegment: Tuple[Context, Multisegment]
) -> None:
context, multisegment = context_with_multisegment
result = context.multisegment_length(multisegment)
assert isinstance(result, Expression)
@given(strategies.contexts_with_multisegments)
def test_value(context_with_multisegment: Tuple[Context, Multisegment]
) -> None:
context, multisegment = context_with_multisegment
result = context.multisegment_length(multisegment)
assert all(result > context.segment_length(segment)
for segment in multisegment.segments)
@given(strategies.contexts_with_rational_multisegments)
def test_reversals(context_with_multisegment: Tuple[Context, Multisegment]
) -> None:
context, multisegment = context_with_multisegment
result = context.multisegment_length(multisegment)
assert result == context.multisegment_length(
reverse_multisegment(multisegment))
assert result == context.multisegment_length(
reverse_multisegment_coordinates(multisegment))
|
from collections import OrderedDict
from typing import Set, List, Optional, Union
from irrd.conf import PASSWORD_HASH_DUMMY_VALUE
from irrd.utils.pgp import get_gpg_instance
from .config import PASSWORD_HASHERS
from .fields import (RPSLTextField, RPSLIPv4PrefixField, RPSLIPv4PrefixesField, RPSLIPv6PrefixField,
RPSLIPv6PrefixesField, RPSLIPv4AddressRangeField, RPSLASNumberField, RPSLASBlockField,
RPSLSetNameField, RPSLEmailField, RPSLDNSNameField, RPSLGenericNameField, RPSLReferenceField,
RPSLReferenceListField, RPSLAuthField, RPSLRouteSetMembersField, RPSLChangedField)
from .parser import RPSLObject, UnknownRPSLObjectClassException
def rpsl_object_from_text(text, strict_validation=True, default_source: Optional[str]=None) -> RPSLObject:
rpsl_object_class = text.split(':', maxsplit=1)[0].strip()
try:
klass = OBJECT_CLASS_MAPPING[rpsl_object_class]
except KeyError:
raise UnknownRPSLObjectClassException(f'unknown object class: {rpsl_object_class}',
rpsl_object_class=rpsl_object_class)
return klass(from_text=text, strict_validation=strict_validation, default_source=default_source)
class RPSLAsBlock(RPSLObject):
fields = OrderedDict([
('as-block', RPSLASBlockField(primary_key=True, lookup_key=True)),
('descr', RPSLTextField(multiple=True, optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLAsSet(RPSLObject):
fields = OrderedDict([
('as-set', RPSLSetNameField(primary_key=True, lookup_key=True, prefix='AS')),
('descr', RPSLTextField(multiple=True, optional=True)),
('members', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['aut-num', 'as-set'], strong=False)),
('mbrs-by-ref', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['mntner'], allow_kw_any=True, strong=False)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLAutNum(RPSLObject):
fields = OrderedDict([
('aut-num', RPSLASNumberField(primary_key=True, lookup_key=True)),
('as-name', RPSLGenericNameField(allowed_prefixes=['AS'])),
('descr', RPSLTextField(multiple=True, optional=True)),
('member-of', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['as-set'], strong=False)),
('import', RPSLTextField(optional=True, multiple=True)),
('mp-import', RPSLTextField(optional=True, multiple=True)),
('import-via', RPSLTextField(optional=True, multiple=True)),
('export', RPSLTextField(optional=True, multiple=True)),
('mp-export', RPSLTextField(optional=True, multiple=True)),
('export-via', RPSLTextField(optional=True, multiple=True)),
('default', RPSLTextField(optional=True, multiple=True)),
('mp-default', RPSLTextField(optional=True, multiple=True)),
('admin-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLDomain(RPSLObject):
fields = OrderedDict([
('domain', RPSLTextField(primary_key=True, lookup_key=True)), # reverse delegation address (range), v4/v6/enum
('descr', RPSLTextField(multiple=True, optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('zone-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('nserver', RPSLTextField(optional=True, multiple=True)), # DNS name, possibly followed v4/v6
('sub-dom', RPSLTextField(optional=True, multiple=True)),
('dom-net', RPSLTextField(optional=True, multiple=True)),
('refer', RPSLTextField(optional=True)), # ???
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLFilterSet(RPSLObject):
fields = OrderedDict([
('filter-set', RPSLSetNameField(primary_key=True, lookup_key=True, prefix='FLTR')),
('descr', RPSLTextField(multiple=True, optional=True)),
('filter', RPSLTextField()),
('mp-filter', RPSLTextField(optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLInetRtr(RPSLObject):
fields = OrderedDict([
('inet-rtr', RPSLDNSNameField(primary_key=True, lookup_key=True)),
('descr', RPSLTextField(multiple=True, optional=True)),
('alias', RPSLDNSNameField(optional=True, multiple=True)),
('local-as', RPSLASNumberField()),
('ifaddr', RPSLTextField(optional=True, multiple=True)),
('interface', RPSLTextField(optional=True, multiple=True)),
('peer', RPSLTextField(optional=True, multiple=True)),
('mp-peer', RPSLTextField(optional=True, multiple=True)),
('member-of', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['rtr-set'], strong=False)),
('rs-in', RPSLTextField(optional=True)),
('rs-out', RPSLTextField(optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLInet6Num(RPSLObject):
fields = OrderedDict([
('inet6num', RPSLIPv6PrefixField(primary_key=True, lookup_key=True)),
('netname', RPSLTextField()),
('descr', RPSLTextField(multiple=True, optional=True)),
('country', RPSLTextField(multiple=True)),
('admin-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('rev-srv', RPSLTextField(optional=True, multiple=True)),
('status', RPSLTextField()),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLInetnum(RPSLObject):
fields = OrderedDict([
('inetnum', RPSLIPv4AddressRangeField(primary_key=True, lookup_key=True)),
('netname', RPSLTextField()),
('descr', RPSLTextField(multiple=True, optional=True)),
('country', RPSLTextField(multiple=True)),
('admin-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('rev-srv', RPSLTextField(optional=True, multiple=True)),
('status', RPSLTextField()),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLKeyCert(RPSLObject):
fields = OrderedDict([
('key-cert', RPSLGenericNameField(primary_key=True, lookup_key=True)),
('method', RPSLTextField(optional=True)), # Fixed to PGP
('owner', RPSLTextField(optional=True, multiple=True)), # key owner, autogenerate
('fingerpr', RPSLTextField(optional=True)), # fingerprint, autogenerate
('certif', RPSLTextField(multiple=True)), # Actual key
('remarks', RPSLTextField(optional=True, multiple=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
def clean(self) -> bool:
"""
Validate the PGP key and update relevant attributes.
In key-cert objects, the method, owner and fingerpr objects should be
auto-generated based on the certif object. The certif object should be
a valid PGP key, matching the ID in the primary key.
Note that the PGP key is imported into the keyring every time this is
called - this is intentional, to decouple the RPSL database state from
the gpg keyring state.
"""
if not super().clean():
return False # pragma: no cover
gpg = get_gpg_instance()
certif_data = '\n'.join(self.parsed_data.get('certif', [])).replace(',', '\n')
result = gpg.import_keys(certif_data)
if len(result.fingerprints) != 1:
msg = f'Unable to read public PGP key: key corrupt or multiple keys provided'
if result.results:
msg = f'{msg}: {result.results[0]["text"]}'
self.messages.error(msg)
return False
self.fingerprint = result.fingerprints[0]
expected_object_name = 'PGPKEY-' + self.fingerprint[-8:]
actual_object_name = self.parsed_data['key-cert'].upper()
fingerprint_formatted = self.format_fingerprint(self.fingerprint)
if expected_object_name != actual_object_name:
self.messages.error(
f'Invalid object name {actual_object_name}: does not match key fingerprint {fingerprint_formatted}, '
f'expected object name {expected_object_name}'
)
return False
self._update_attribute_value('fingerpr', fingerprint_formatted)
self._update_attribute_value('owner', gpg.list_keys(keys=self.fingerprint)[0]['uids'])
self._update_attribute_value('method', 'PGP')
return True
# This API is correct, but not very practical.
# In typical cases, the PGP key used to sign a message is not known until
# the PGP signature is actually parsed. More useful is a generic method to find
# which key signed a message, which can then be stored and compared to key-cert's later.
# This method will probably be extracted to the update handler.
def verify(self, message: str) -> bool:
gpg = get_gpg_instance()
result = gpg.verify(message)
return result.valid and result.key_status is None and \
self.format_fingerprint(result.fingerprint) == self.parsed_data['fingerpr']
@staticmethod
def format_fingerprint(fingerprint: str) -> str:
"""Format a PGP fingerprint into sections of 4 characters, separated by spaces."""
string_parts = []
for idx in range(0, 40, 4):
string_parts.append(fingerprint[idx:idx + 4])
if idx == 16:
string_parts.append('')
return ' '.join(string_parts)
class RPSLMntner(RPSLObject):
fields = OrderedDict([
('mntner', RPSLGenericNameField(primary_key=True, lookup_key=True)),
('descr', RPSLTextField(multiple=True, optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('upd-to', RPSLEmailField(multiple=True)),
('mnt-nfy', RPSLEmailField(optional=True, multiple=True)),
('auth', RPSLAuthField(multiple=True)),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
def clean(self):
"""Check whether either all hash values are dummy hashes, or none."""
if not super().clean():
return False # pragma: no cover
dummy_matches = [auth[1] == PASSWORD_HASH_DUMMY_VALUE for auth in self._auth_lines(True)]
if any(dummy_matches) and not all(dummy_matches):
self.messages.error('Either all password auth hashes in a submitted mntner must be dummy objects, or none.')
def verify_auth(self, passwords: List[str], keycert_obj_pk: Optional[str]=None) -> bool:
"""
Verify whether one of a given list of passwords matches
any of the auth hashes in this object, or match the
keycert object PK.
"""
for auth in self.parsed_data.get('auth', []):
if keycert_obj_pk and auth.upper() == keycert_obj_pk.upper():
return True
if ' ' not in auth:
continue
scheme, hash = auth.split(' ', 1)
hasher = PASSWORD_HASHERS.get(scheme.upper())
if hasher:
for password in passwords:
try:
if hasher.verify(password, hash):
return True
except ValueError:
pass
return False
def has_dummy_auth_value(self) -> bool:
"""
Check whether this object has dummy auth hashes.
If clean() has returned successfully before, the answer from this method
means that either all or no hashes have dummy values.
"""
auth_values = [auth[1] for auth in self._auth_lines(password_hashes=True)]
return bool(auth_values) and all([value == PASSWORD_HASH_DUMMY_VALUE for value in auth_values])
def force_single_new_password(self, password) -> None:
"""
Overwrite all auth hashes with a single new hash for the provided password.
Retains other methods, i.e. PGPKEY.
"""
hash = 'MD5-PW ' + PASSWORD_HASHERS['MD5-PW'].hash(password)
auths = self._auth_lines(password_hashes=False)
auths.append(hash)
self._update_attribute_value('auth', auths)
def _auth_lines(self, password_hashes=True) -> List[Union[str, List[str]]]:
"""
Return a list of auth values in this object.
If password_hashes=False, returns only non-hash (i.e. PGPKEY) lines.
If password_hashes=True, returns a list of lists, each inner list containing
the hash method and the hash.
"""
lines = self.parsed_data.get('auth', [])
if password_hashes is True:
return [auth.split(' ', 1) for auth in lines if ' ' in auth]
return [auth for auth in lines if ' ' not in auth]
class RPSLPeeringSet(RPSLObject):
fields = OrderedDict([
('peering-set', RPSLSetNameField(primary_key=True, lookup_key=True, prefix='PRNG')),
('descr', RPSLTextField(multiple=True, optional=True)),
('peering', RPSLTextField(optional=True, multiple=True)),
('mp-peering', RPSLTextField(optional=True, multiple=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLPerson(RPSLObject):
fields = OrderedDict([
('person', RPSLTextField(lookup_key=True)),
('address', RPSLTextField(multiple=True)),
('phone', RPSLTextField(multiple=True)),
('fax-no', RPSLTextField(optional=True, multiple=True)),
('e-mail', RPSLEmailField(multiple=True)),
('nic-hdl', RPSLGenericNameField(primary_key=True, lookup_key=True, non_strict_allow_any=True)),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLRole(RPSLObject):
fields = OrderedDict([
('role', RPSLTextField(lookup_key=True)),
('trouble', RPSLTextField(optional=True, multiple=True)),
('address', RPSLTextField(multiple=True)),
('phone', RPSLTextField(multiple=True)),
('fax-no', RPSLTextField(optional=True, multiple=True)),
('e-mail', RPSLEmailField(multiple=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('nic-hdl', RPSLGenericNameField(primary_key=True, lookup_key=True, non_strict_allow_any=True)),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLRoute(RPSLObject):
fields = OrderedDict([
('route', RPSLIPv4PrefixField(primary_key=True, lookup_key=True)),
('descr', RPSLTextField(multiple=True, optional=True)),
('origin', RPSLASNumberField(primary_key=True)),
('holes', RPSLIPv4PrefixesField(optional=True, multiple=True)),
('member-of', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['route-set'], strong=False)),
('inject', RPSLTextField(optional=True, multiple=True)),
('aggr-bndry', RPSLTextField(optional=True)),
('aggr-mtd', RPSLTextField(optional=True)),
('export-comps', RPSLTextField(optional=True)),
('components', RPSLTextField(optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('geoidx', RPSLTextField(optional=True, multiple=True)),
('roa-uri', RPSLTextField(optional=True)),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLRouteSet(RPSLObject):
fields = OrderedDict([
('route-set', RPSLSetNameField(primary_key=True, lookup_key=True, prefix='RS')),
('members', RPSLRouteSetMembersField(ip_version=4, lookup_key=True, optional=True, multiple=True)),
('mp-members', RPSLRouteSetMembersField(ip_version=None, lookup_key=True, optional=True, multiple=True)),
('mbrs-by-ref', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['mntner'], allow_kw_any=True, strong=False)),
('descr', RPSLTextField(multiple=True, optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLRoute6(RPSLObject):
fields = OrderedDict([
('route6', RPSLIPv6PrefixField(primary_key=True, lookup_key=True)),
('descr', RPSLTextField(multiple=True, optional=True)),
('origin', RPSLASNumberField(primary_key=True)),
('holes', RPSLIPv6PrefixesField(optional=True, multiple=True)),
('member-of', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['route-set'], strong=False)),
('inject', RPSLTextField(optional=True, multiple=True)),
('aggr-bndry', RPSLTextField(optional=True)),
('aggr-mtd', RPSLTextField(optional=True)),
('export-comps', RPSLTextField(optional=True)),
('components', RPSLTextField(optional=True)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('geoidx', RPSLTextField(optional=True, multiple=True)),
('roa-uri', RPSLTextField(optional=True)),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
class RPSLRtrSet(RPSLObject):
fields = OrderedDict([
('rtr-set', RPSLSetNameField(primary_key=True, lookup_key=True, prefix='RTRS')),
('descr', RPSLTextField(multiple=True, optional=True)),
('members', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['inet-rtr', 'rtr-set'], strong=False)),
('mp-members', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['inet-rtr', 'rtr-set'], strong=False)),
('mbrs-by-ref', RPSLReferenceListField(lookup_key=True, optional=True, multiple=True, referring=['mntner'], allow_kw_any=True, strong=False)),
('admin-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('tech-c', RPSLReferenceField(lookup_key=True, optional=True, multiple=True, referring=['role', 'person'])),
('remarks', RPSLTextField(optional=True, multiple=True)),
('notify', RPSLEmailField(optional=True, multiple=True)),
('mnt-by', RPSLReferenceListField(lookup_key=True, multiple=True, referring=['mntner'])),
('changed', RPSLChangedField(multiple=True)),
('source', RPSLGenericNameField()),
])
OBJECT_CLASS_MAPPING = {
'as-block': RPSLAsBlock,
'as-set': RPSLAsSet,
'aut-num': RPSLAutNum,
'domain': RPSLDomain,
'filter-set': RPSLFilterSet,
'inet-rtr': RPSLInetRtr,
'inet6num': RPSLInet6Num,
'inetnum': RPSLInetnum,
'key-cert': RPSLKeyCert,
'mntner': RPSLMntner,
'peering-set': RPSLPeeringSet,
'person': RPSLPerson,
'role': RPSLRole,
'route': RPSLRoute,
'route-set': RPSLRouteSet,
'route6': RPSLRoute6,
'rtr-set': RPSLRtrSet,
}
def lookup_field_names() -> Set[str]:
"""Return all unique names of all lookup keys in all objects, plus 'origin'."""
names = {'origin'}
for object_class in OBJECT_CLASS_MAPPING.values():
names.update([f for f in object_class.lookup_fields if f not in object_class.pk_fields])
return names
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import dirname, isdir
import click
from platformio import app, exception, util
from platformio.commands.boards import print_boards
from platformio.compat import dump_json_to_unicode
from platformio.managers.platform import PlatformFactory, PlatformManager
@click.group(short_help="Platform Manager")
def cli():
pass
def _print_platforms(platforms):
for platform in platforms:
click.echo(
"{name} ~ {title}".format(
name=click.style(platform["name"], fg="cyan"), title=platform["title"]
)
)
click.echo("=" * (3 + len(platform["name"] + platform["title"])))
click.echo(platform["description"])
click.echo()
if "homepage" in platform:
click.echo("Home: %s" % platform["homepage"])
if "frameworks" in platform and platform["frameworks"]:
click.echo("Frameworks: %s" % ", ".join(platform["frameworks"]))
if "packages" in platform:
click.echo("Packages: %s" % ", ".join(platform["packages"]))
if "version" in platform:
if "__src_url" in platform:
click.echo(
"Version: #%s (%s)" % (platform["version"], platform["__src_url"])
)
else:
click.echo("Version: " + platform["version"])
click.echo()
def _get_registry_platforms():
platforms = util.get_api_result("/platforms", cache_valid="7d")
pm = PlatformManager()
for platform in platforms or []:
platform["versions"] = pm.get_all_repo_versions(platform["name"])
return platforms
def _get_platform_data(*args, **kwargs):
try:
return _get_installed_platform_data(*args, **kwargs)
except exception.UnknownPlatform:
return _get_registry_platform_data(*args, **kwargs)
def _get_installed_platform_data(platform, with_boards=True, expose_packages=True):
p = PlatformFactory.newPlatform(platform)
data = dict(
name=p.name,
title=p.title,
description=p.description,
version=p.version,
homepage=p.homepage,
url=p.homepage,
repository=p.repository_url,
license=p.license,
forDesktop=not p.is_embedded(),
frameworks=sorted(list(p.frameworks) if p.frameworks else []),
packages=list(p.packages) if p.packages else [],
)
# if dump to API
# del data['version']
# return data
# overwrite VCS version and add extra fields
manifest = PlatformManager().load_manifest(dirname(p.manifest_path))
assert manifest
for key in manifest:
if key == "version" or key.startswith("__"):
data[key] = manifest[key]
if with_boards:
data["boards"] = [c.get_brief_data() for c in p.get_boards().values()]
if not data["packages"] or not expose_packages:
return data
data["packages"] = []
installed_pkgs = p.get_installed_packages()
for name, opts in p.packages.items():
item = dict(
name=name,
type=p.get_package_type(name),
requirements=opts.get("version"),
optional=opts.get("optional") is True,
)
if name in installed_pkgs:
for key, value in installed_pkgs[name].items():
if key not in ("url", "version", "description"):
continue
item[key] = value
if key == "version":
item["originalVersion"] = util.get_original_version(value)
data["packages"].append(item)
return data
def _get_registry_platform_data( # pylint: disable=unused-argument
platform, with_boards=True, expose_packages=True
):
_data = None
for p in _get_registry_platforms():
if p["name"] == platform:
_data = p
break
if not _data:
return None
data = dict(
name=_data["name"],
title=_data["title"],
description=_data["description"],
homepage=_data["homepage"],
repository=_data["repository"],
url=_data["url"],
license=_data["license"],
forDesktop=_data["forDesktop"],
frameworks=_data["frameworks"],
packages=_data["packages"],
versions=_data["versions"],
)
if with_boards:
data["boards"] = [
board
for board in PlatformManager().get_registered_boards()
if board["platform"] == _data["name"]
]
return data
@cli.command("search", short_help="Search for development platform")
@click.argument("query", required=False)
@click.option("--json-output", is_flag=True)
def platform_search(query, json_output):
platforms = []
for platform in _get_registry_platforms():
if query == "all":
query = ""
search_data = dump_json_to_unicode(platform)
if query and query.lower() not in search_data.lower():
continue
platforms.append(
_get_registry_platform_data(
platform["name"], with_boards=False, expose_packages=False
)
)
if json_output:
click.echo(dump_json_to_unicode(platforms))
else:
_print_platforms(platforms)
@cli.command("frameworks", short_help="List supported frameworks, SDKs")
@click.argument("query", required=False)
@click.option("--json-output", is_flag=True)
def platform_frameworks(query, json_output):
frameworks = []
for framework in util.get_api_result("/frameworks", cache_valid="7d"):
if query == "all":
query = ""
search_data = dump_json_to_unicode(framework)
if query and query.lower() not in search_data.lower():
continue
framework["homepage"] = "https://platformio.org/frameworks/" + framework["name"]
framework["platforms"] = [
platform["name"]
for platform in _get_registry_platforms()
if framework["name"] in platform["frameworks"]
]
frameworks.append(framework)
frameworks = sorted(frameworks, key=lambda manifest: manifest["name"])
if json_output:
click.echo(dump_json_to_unicode(frameworks))
else:
_print_platforms(frameworks)
@cli.command("list", short_help="List installed development platforms")
@click.option("--json-output", is_flag=True)
def platform_list(json_output):
platforms = []
pm = PlatformManager()
for manifest in pm.get_installed():
platforms.append(
_get_installed_platform_data(
manifest["__pkg_dir"], with_boards=False, expose_packages=False
)
)
platforms = sorted(platforms, key=lambda manifest: manifest["name"])
if json_output:
click.echo(dump_json_to_unicode(platforms))
else:
_print_platforms(platforms)
@cli.command("show", short_help="Show details about development platform")
@click.argument("platform")
@click.option("--json-output", is_flag=True)
def platform_show(platform, json_output): # pylint: disable=too-many-branches
data = _get_platform_data(platform)
if not data:
raise exception.UnknownPlatform(platform)
if json_output:
return click.echo(dump_json_to_unicode(data))
click.echo(
"{name} ~ {title}".format(
name=click.style(data["name"], fg="cyan"), title=data["title"]
)
)
click.echo("=" * (3 + len(data["name"] + data["title"])))
click.echo(data["description"])
click.echo()
if "version" in data:
click.echo("Version: %s" % data["version"])
if data["homepage"]:
click.echo("Home: %s" % data["homepage"])
if data["repository"]:
click.echo("Repository: %s" % data["repository"])
if data["url"]:
click.echo("Vendor: %s" % data["url"])
if data["license"]:
click.echo("License: %s" % data["license"])
if data["frameworks"]:
click.echo("Frameworks: %s" % ", ".join(data["frameworks"]))
if not data["packages"]:
return None
if not isinstance(data["packages"][0], dict):
click.echo("Packages: %s" % ", ".join(data["packages"]))
else:
click.echo()
click.secho("Packages", bold=True)
click.echo("--------")
for item in data["packages"]:
click.echo()
click.echo("Package %s" % click.style(item["name"], fg="yellow"))
click.echo("-" * (8 + len(item["name"])))
if item["type"]:
click.echo("Type: %s" % item["type"])
click.echo("Requirements: %s" % item["requirements"])
click.echo(
"Installed: %s" % ("Yes" if item.get("version") else "No (optional)")
)
if "version" in item:
click.echo("Version: %s" % item["version"])
if "originalVersion" in item:
click.echo("Original version: %s" % item["originalVersion"])
if "description" in item:
click.echo("Description: %s" % item["description"])
if data["boards"]:
click.echo()
click.secho("Boards", bold=True)
click.echo("------")
print_boards(data["boards"])
return True
@cli.command("install", short_help="Install new development platform")
@click.argument("platforms", nargs=-1, required=True, metavar="[PLATFORM...]")
@click.option("--with-package", multiple=True)
@click.option("--without-package", multiple=True)
@click.option("--skip-default-package", is_flag=True)
@click.option("--with-all-packages", is_flag=True)
@click.option(
"-f",
"--force",
is_flag=True,
help="Reinstall/redownload dev/platform and its packages if exist",
)
def platform_install( # pylint: disable=too-many-arguments
platforms,
with_package,
without_package,
skip_default_package,
with_all_packages,
force,
):
pm = PlatformManager()
for platform in platforms:
if pm.install(
name=platform,
with_packages=with_package,
without_packages=without_package,
skip_default_package=skip_default_package,
with_all_packages=with_all_packages,
force=force,
):
click.secho(
"The platform '%s' has been successfully installed!\n"
"The rest of packages will be installed automatically "
"depending on your build environment." % platform,
fg="green",
)
@cli.command("uninstall", short_help="Uninstall development platform")
@click.argument("platforms", nargs=-1, required=True, metavar="[PLATFORM...]")
def platform_uninstall(platforms):
pm = PlatformManager()
for platform in platforms:
if pm.uninstall(platform):
click.secho(
"The platform '%s' has been successfully uninstalled!" % platform,
fg="green",
)
@cli.command("update", short_help="Update installed development platforms")
@click.argument("platforms", nargs=-1, required=False, metavar="[PLATFORM...]")
@click.option(
"-p", "--only-packages", is_flag=True, help="Update only the platform packages"
)
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
@click.option("--json-output", is_flag=True)
def platform_update( # pylint: disable=too-many-locals
platforms, only_packages, only_check, dry_run, json_output
):
pm = PlatformManager()
pkg_dir_to_name = {}
if not platforms:
platforms = []
for manifest in pm.get_installed():
platforms.append(manifest["__pkg_dir"])
pkg_dir_to_name[manifest["__pkg_dir"]] = manifest.get(
"title", manifest["name"]
)
only_check = dry_run or only_check
if only_check and json_output:
result = []
for platform in platforms:
pkg_dir = platform if isdir(platform) else None
requirements = None
url = None
if not pkg_dir:
name, requirements, url = pm.parse_pkg_uri(platform)
pkg_dir = pm.get_package_dir(name, requirements, url)
if not pkg_dir:
continue
latest = pm.outdated(pkg_dir, requirements)
if (
not latest
and not PlatformFactory.newPlatform(pkg_dir).are_outdated_packages()
):
continue
data = _get_installed_platform_data(
pkg_dir, with_boards=False, expose_packages=False
)
if latest:
data["versionLatest"] = latest
result.append(data)
return click.echo(dump_json_to_unicode(result))
# cleanup cached board and platform lists
app.clean_cache()
for platform in platforms:
click.echo(
"Platform %s"
% click.style(pkg_dir_to_name.get(platform, platform), fg="cyan")
)
click.echo("--------")
pm.update(platform, only_packages=only_packages, only_check=only_check)
click.echo()
return True
|
#!/usr/bin/env python
#
# Read in a text file, run CoreNLP to get features (pos, lemma, NER),
# and convert it to JSRE's data ("examples") format.
# Interactively solicity labels from the user.
# Note: see below for how to select whether 'elements' or 'minerals'
# will be used to generate the candidate relation pairs
# (enables separate evaluation for these classes).
#
# Author: Kiri Wagstaff
# March 13, 2017
# Copyright notice at bottom of file.
import sys, os, io
import json
import urllib
from pycorenlp import StanfordCoreNLP
def pretty_print(json_obj):
print json.dumps(json_obj,
sort_keys=True, indent=2, separators=(',', ': '))
indirname = '../text/lpsc15-C-raymond-sol1159'
#indirname = '../text/lpsc15-C-raymond-jsre'
outdirname = '../text/lpsc15-C-kiri-jsre'
#indirname = '../text/lpsc16-C-raymond'
#outdirname = '../text/lpsc16-C-kiri-jsre'
dirlist = [fn for fn in os.listdir(indirname) if
fn.endswith('.txt')]
dirlist.sort()
corenlp = StanfordCoreNLP('http://localhost:9000')
# Specify CoreNLP properties
props = { 'annotators': 'tokenize,ssplit,lemma,pos,ner',
'ner.model': 'ner_model_train_62r15_685k14_384k15.ser.gz',
'outputFormat': 'json'}
print 'Processing %d documents. ' % len(dirlist)
for fn in dirlist:
fnamebase = fn[:fn.find('.txt')]
infname = '%s.txt' % fnamebase
outfname = '%s-mineral.examples' % fnamebase
print 'Reading in %s' % infname
inf = open(os.path.join(indirname, infname))
text = inf.read()
inf.close()
# Quote (with percent-encoding) reserved characters in URL for CoreNLP
text = urllib.quote(text)
doc = corenlp.annotate(text, properties=props)
with io.open(os.path.join(outdirname, outfname), 'w', encoding='utf8') as outf:
# Goal: Map Raymond's .ann (brat) annotations into the CoreNLP-parsed
# sentence/token structure.
ex_id = 0
for s in doc['sentences']:
# For each pair of target+(element|mineral) entities,
# are they in a contains relationship?
# label:
# 0 - negative
# 1 - entity_1 contains entity_2
# 2 - entity_2 contains entity_1
# Get the relevant entities (Target, Element, Mineral)
targets = [t for t in s['tokens'] if t['ner'] == 'Target']
elements = [t for t in s['tokens'] if t['ner'] == 'Element']
minerals = [t for t in s['tokens'] if t['ner'] == 'Mineral']
# Select whether to use elements or minerals
# active = elements
active = minerals
'''
print ' '.join([w['word'] for w in s['tokens']])
print minerals
raw_input()
'''
for i in range(len(targets)):
for j in range(len(active)):
# Ideally, use "contains" brat annotations to set label
# For now, go interactive
# Print the sentence with relevant entities highlighted
words = [w['word'] for w in s['tokens']]
sent = ''
for w,word in enumerate(words):
# CoreNLP indexes starting at 1
if (w == targets[i]['index']-1 or
w == active[j]['index']-1):
sent += '_%s_ ' % word
else:
sent += '%s ' % word
print sent
# Solicit a label form the user
label = -1
while label not in [0,1]:
print 'Select one:'
print '0: no relationship'
print '1: %s contains %s' % (targets[i]['word'],
active[j]['word'])
label = int(raw_input())
# Change label from 1 to 2 if needed
if label != 0:
if targets[i]['index'] < active[j]['index']:
label = 1
else:
label = 2
# Create a unique identifier
id = '%s_%s_%s' % (fnamebase,
str(s['index']),
str(ex_id))
ex_id += 1
body = ''
for t in s['tokens']:
# Target entity is the agent;
# Element entity is the target (of the relation)
if t['index'] == targets[i]['index']:
entity_label = 'A'
elif t['index'] == active[j]['index']:
entity_label = 'T'
else:
entity_label = 'O'
# CoreNLP indexes starting at 1
body += '%d&&%s&&%s&&%s&&%s&&%s ' % (t['index']-1,
t['word'],
t['lemma'],
t['pos'],
t['ner'],
entity_label)
# Output the example
print '%s\t%s\t%s\n' % (label, id, body)
outf.write('%s\t%s\t%s\n' % (label, id, body))
print
# Copyright 2017, by the California Institute of Technology. ALL
# RIGHTS RESERVED. United States Government Sponsorship
# acknowledged. Any commercial use must be negotiated with the Office
# of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws and
# regulations. By accepting this document, the user agrees to comply
# with all applicable U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign
# countries or providing access to foreign persons.
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Raven Core developers
# Copyright (c) 2021 The Bagi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test the zapwallettxes functionality.
- start two bagid nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import BagiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
class ZapWalletTXesTest (BagiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, err_msg="Wait for getMempoolInfo", timeout=3)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes, but not persistmempool.
# The unconfirmed transaction is zapped and is no longer in the wallet.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
|
import tensorflow as tf
import numpy as np
from collections import Counter
from tqdm import tqdm
import tableprint as tp
import re
from .args import *
from .util import *
from .text_util import *
from .input import *
def eager_to_str(v):
return bytes_to_string(np.array(v))
def extend_args(parser):
parser.add_argument('--print-records', action='store_true')
if __name__ == "__main__":
args = get_args(extend_args)
vocab = Vocab.load_from_args(args)
count = 0
tf.enable_eager_execution()
dist = Counter()
types = set()
labels = set()
for i in tqdm(tf.python_io.tf_record_iterator(args["train_input_path"]), total=args["limit"]):
if args["limit"] is not None and count > args["limit"]:
break
# Parse
r = parse_single_example(i)
r,label = reshape_example(args, r)
r["type_string"] = eager_to_str(r["type_string"])
r["src"] = vocab.ids_to_english(np.array(r["src"]))
r["label"] = vocab.inverse_lookup(int(r["label"]))
r["kb_nodes"] = [vocab.ids_to_english(np.array(i)) for i in r["kb_nodes"] if np.array(i).size > 0]
count += 1
# Skip non matching prefixes
if args["filter_type_prefix"] is not None:
if not r["type_string"].startswith(args["filter_type_prefix"]):
continue
types.add(r["type_string"])
labels.add(r["label"])
dist[(r["label"], r["type_string"])] += 1
if args["print_records"]:
print(r["src"] + " = " + r["label"])
for j in r["kb_nodes"]:
print("NODE: " + j)
print()
print(f"\nTotal records processed: {count}")
def shorten(i):
return re.sub('[^A-Z]', '', i)
# return i.replace("Station", "S").replace("Property", "P").replace("Adjacent", "A")
headers = ["Label"] + [shorten(i) for i in list(types)] + ["Total"]
data = [ [label] + [dist[(label, tpe)] for tpe in types] + [sum([dist[(label, tpe)] for tpe in types])] for label in labels]
data.append(["Total"] + [sum([dist[(label, tpe)] for label in labels]) for tpe in types] + [sum(dist.values())])
width = [20] + [7 for i in types] + [7]
tp.table(data, headers, width=width)
|
from pathlib import Path
import numpy as np
class Tree:
def __init__(self, root) -> None:
'''注意,root 下面不能存在子目录'''
self.root = Path(root)
self.special_suffix = ['.DNG', '.CR2']
@property
def full_names(self):
'''全部文件名称'''
return {name.as_posix()
for name in self.root.iterdir()}
@property
def suffix(self):
'''全部文件后缀'''
return {n.suffix for n in self.names}
@property
def special_names(self):
'''全部文件名称'''
return {name.as_posix()
for name in self.root.iterdir()
if name.suffix in self.special_suffix}
@property
def names(self):
'''全部可用图片名称'''
return self.full_names - self.special_names
def __len__(self):
return len(self.names)
def take_shuffle(self, num):
'''随机选择 num 个图片'''
_names = list(self.names)
m = len(_names)
index = np.arange(m)
np.random.shuffle(index)
return np.take(_names, index[:num]).tolist()
class PathSet:
def __init__(self, root) -> None:
self.root = root
@property
def parent_paths(self):
# 获取全部数据的路径
_paths = [path.as_posix()
for path in Path(self.root).iterdir()]
return _paths
@property
def bunch(self):
_bunch = {Path(name).name: Tree(name).names
for name in self.parent_paths}
return _bunch
@property
def names(self):
_names = set()
for val in self.bunch.values():
_names = _names.union(val)
return _names
|
const crypto = require('crypto')
const path = require('path')
const util = require('util')
const {ipcRenderer} = require('electron')
const _ = require('underscore-plus')
const {deprecate} = require('grim')
const {CompositeDisposable, Disposable, Emitter} = require('event-kit')
const fs = require('fs-plus')
const {mapSourcePosition} = require('@atom/source-map-support')
const WindowEventHandler = require('./window-event-handler')
const StateStore = require('./state-store')
const registerDefaultCommands = require('./register-default-commands')
const {updateProcessEnv} = require('./update-process-env')
const ConfigSchema = require('./config-schema')
const DeserializerManager = require('./deserializer-manager')
const ViewRegistry = require('./view-registry')
const NotificationManager = require('./notification-manager')
const Config = require('./config')
const KeymapManager = require('./keymap-extensions')
const TooltipManager = require('./tooltip-manager')
const CommandRegistry = require('./command-registry')
const URIHandlerRegistry = require('./uri-handler-registry')
const GrammarRegistry = require('./grammar-registry')
const {HistoryManager} = require('./history-manager')
const ReopenProjectMenuManager = require('./reopen-project-menu-manager')
const StyleManager = require('./style-manager')
const PackageManager = require('./package-manager')
const ThemeManager = require('./theme-manager')
const MenuManager = require('./menu-manager')
const ContextMenuManager = require('./context-menu-manager')
const CommandInstaller = require('./command-installer')
const CoreURIHandlers = require('./core-uri-handlers')
const ProtocolHandlerInstaller = require('./protocol-handler-installer')
const Project = require('./project')
const TitleBar = require('./title-bar')
const Workspace = require('./workspace')
const PaneContainer = require('./pane-container')
const PaneAxis = require('./pane-axis')
const Pane = require('./pane')
const Dock = require('./dock')
const TextEditor = require('./text-editor')
const TextBuffer = require('text-buffer')
const TextEditorRegistry = require('./text-editor-registry')
const AutoUpdateManager = require('./auto-update-manager')
const stat = util.promisify(fs.stat)
let nextId = 0
// Essential: Atom global for dealing with packages, themes, menus, and the window.
//
// An instance of this class is always available as the `atom` global.
class AtomEnvironment {
/*
Section: Properties
*/
constructor (params = {}) {
this.id = (params.id != null) ? params.id : nextId++
// Public: A {Clipboard} instance
this.clipboard = params.clipboard
this.updateProcessEnv = params.updateProcessEnv || updateProcessEnv
this.enablePersistence = params.enablePersistence
this.applicationDelegate = params.applicationDelegate
this.nextProxyRequestId = 0
this.unloading = false
this.loadTime = null
this.emitter = new Emitter()
this.disposables = new CompositeDisposable()
this.pathsWithWaitSessions = new Set()
// Public: A {DeserializerManager} instance
this.deserializers = new DeserializerManager(this)
this.deserializeTimings = {}
// Public: A {ViewRegistry} instance
this.views = new ViewRegistry(this)
// Public: A {NotificationManager} instance
this.notifications = new NotificationManager()
this.stateStore = new StateStore('AtomEnvironments', 1)
// Public: A {Config} instance
this.config = new Config({
saveCallback: settings => {
if (this.enablePersistence) {
this.applicationDelegate.setUserSettings(settings, this.config.getUserConfigPath())
}
}
})
this.config.setSchema(null, {type: 'object', properties: _.clone(ConfigSchema)})
// Public: A {KeymapManager} instance
this.keymaps = new KeymapManager({notificationManager: this.notifications})
// Public: A {TooltipManager} instance
this.tooltips = new TooltipManager({keymapManager: this.keymaps, viewRegistry: this.views})
// Public: A {CommandRegistry} instance
this.commands = new CommandRegistry()
this.uriHandlerRegistry = new URIHandlerRegistry()
// Public: A {GrammarRegistry} instance
this.grammars = new GrammarRegistry({config: this.config})
// Public: A {StyleManager} instance
this.styles = new StyleManager()
// Public: A {PackageManager} instance
this.packages = new PackageManager({
config: this.config,
styleManager: this.styles,
commandRegistry: this.commands,
keymapManager: this.keymaps,
notificationManager: this.notifications,
grammarRegistry: this.grammars,
deserializerManager: this.deserializers,
viewRegistry: this.views,
uriHandlerRegistry: this.uriHandlerRegistry
})
// Public: A {ThemeManager} instance
this.themes = new ThemeManager({
packageManager: this.packages,
config: this.config,
styleManager: this.styles,
notificationManager: this.notifications,
viewRegistry: this.views
})
// Public: A {MenuManager} instance
this.menu = new MenuManager({keymapManager: this.keymaps, packageManager: this.packages})
// Public: A {ContextMenuManager} instance
this.contextMenu = new ContextMenuManager({keymapManager: this.keymaps})
this.packages.setMenuManager(this.menu)
this.packages.setContextMenuManager(this.contextMenu)
this.packages.setThemeManager(this.themes)
// Public: A {Project} instance
this.project = new Project({
notificationManager: this.notifications,
packageManager: this.packages,
grammarRegistry: this.grammars,
config: this.config,
applicationDelegate: this.applicationDelegate
})
this.commandInstaller = new CommandInstaller(this.applicationDelegate)
this.protocolHandlerInstaller = new ProtocolHandlerInstaller()
// Public: A {TextEditorRegistry} instance
this.textEditors = new TextEditorRegistry({
config: this.config,
grammarRegistry: this.grammars,
assert: this.assert.bind(this),
packageManager: this.packages
})
// Public: A {Workspace} instance
this.workspace = new Workspace({
config: this.config,
project: this.project,
packageManager: this.packages,
grammarRegistry: this.grammars,
deserializerManager: this.deserializers,
notificationManager: this.notifications,
applicationDelegate: this.applicationDelegate,
viewRegistry: this.views,
assert: this.assert.bind(this),
textEditorRegistry: this.textEditors,
styleManager: this.styles,
enablePersistence: this.enablePersistence
})
this.themes.workspace = this.workspace
this.autoUpdater = new AutoUpdateManager({applicationDelegate: this.applicationDelegate})
if (this.keymaps.canLoadBundledKeymapsFromMemory()) {
this.keymaps.loadBundledKeymaps()
}
this.registerDefaultCommands()
this.registerDefaultOpeners()
this.registerDefaultDeserializers()
this.windowEventHandler = new WindowEventHandler({atomEnvironment: this, applicationDelegate: this.applicationDelegate})
// Public: A {HistoryManager} instance
this.history = new HistoryManager({project: this.project, commands: this.commands, stateStore: this.stateStore})
// Keep instances of HistoryManager in sync
this.disposables.add(this.history.onDidChangeProjects(event => {
if (!event.reloaded) this.applicationDelegate.didChangeHistoryManager()
}))
}
initialize (params = {}) {
// This will force TextEditorElement to register the custom element, so that
// using `document.createElement('atom-text-editor')` works if it's called
// before opening a buffer.
require('./text-editor-element')
this.window = params.window
this.document = params.document
this.blobStore = params.blobStore
this.configDirPath = params.configDirPath
const {devMode, safeMode, resourcePath, userSettings, projectSpecification} = this.getLoadSettings()
ConfigSchema.projectHome = {
type: 'string',
default: path.join(fs.getHomeDirectory(), 'github'),
description: 'The directory where projects are assumed to be located. Packages created using the Package Generator will be stored here by default.'
}
this.config.initialize({
mainSource: this.enablePersistence && path.join(this.configDirPath, 'config.cson'),
projectHomeSchema: ConfigSchema.projectHome
})
this.config.resetUserSettings(userSettings)
if (projectSpecification != null && projectSpecification.config != null) {
this.project.replace(projectSpecification)
}
this.menu.initialize({resourcePath})
this.contextMenu.initialize({resourcePath, devMode})
this.keymaps.configDirPath = this.configDirPath
this.keymaps.resourcePath = resourcePath
this.keymaps.devMode = devMode
if (!this.keymaps.canLoadBundledKeymapsFromMemory()) {
this.keymaps.loadBundledKeymaps()
}
this.commands.attach(this.window)
this.styles.initialize({configDirPath: this.configDirPath})
this.packages.initialize({devMode, configDirPath: this.configDirPath, resourcePath, safeMode})
this.themes.initialize({configDirPath: this.configDirPath, resourcePath, safeMode, devMode})
this.commandInstaller.initialize(this.getVersion())
this.uriHandlerRegistry.registerHostHandler('core', CoreURIHandlers.create(this))
this.autoUpdater.initialize()
this.protocolHandlerInstaller.initialize(this.config, this.notifications)
this.themes.loadBaseStylesheets()
this.initialStyleElements = this.styles.getSnapshot()
if (params.onlyLoadBaseStyleSheets) this.themes.initialLoadComplete = true
this.setBodyPlatformClass()
this.stylesElement = this.styles.buildStylesElement()
this.document.head.appendChild(this.stylesElement)
this.keymaps.subscribeToFileReadFailure()
this.installUncaughtErrorHandler()
this.attachSaveStateListeners()
this.windowEventHandler.initialize(this.window, this.document)
const didChangeStyles = this.didChangeStyles.bind(this)
this.disposables.add(this.styles.onDidAddStyleElement(didChangeStyles))
this.disposables.add(this.styles.onDidUpdateStyleElement(didChangeStyles))
this.disposables.add(this.styles.onDidRemoveStyleElement(didChangeStyles))
this.observeAutoHideMenuBar()
this.disposables.add(this.applicationDelegate.onDidChangeHistoryManager(() => this.history.loadState()))
}
preloadPackages () {
return this.packages.preloadPackages()
}
attachSaveStateListeners () {
const saveState = _.debounce(() => {
this.window.requestIdleCallback(() => {
if (!this.unloading) this.saveState({isUnloading: false})
})
}, this.saveStateDebounceInterval)
this.document.addEventListener('mousedown', saveState, true)
this.document.addEventListener('keydown', saveState, true)
this.disposables.add(new Disposable(() => {
this.document.removeEventListener('mousedown', saveState, true)
this.document.removeEventListener('keydown', saveState, true)
}))
}
registerDefaultDeserializers () {
this.deserializers.add(Workspace)
this.deserializers.add(PaneContainer)
this.deserializers.add(PaneAxis)
this.deserializers.add(Pane)
this.deserializers.add(Dock)
this.deserializers.add(Project)
this.deserializers.add(TextEditor)
this.deserializers.add(TextBuffer)
}
registerDefaultCommands () {
registerDefaultCommands({
commandRegistry: this.commands,
config: this.config,
commandInstaller: this.commandInstaller,
notificationManager: this.notifications,
project: this.project,
clipboard: this.clipboard
})
}
registerDefaultOpeners () {
this.workspace.addOpener(uri => {
switch (uri) {
case 'atom://.atom/stylesheet':
return this.workspace.openTextFile(this.styles.getUserStyleSheetPath())
case 'atom://.atom/keymap':
return this.workspace.openTextFile(this.keymaps.getUserKeymapPath())
case 'atom://.atom/config':
return this.workspace.openTextFile(this.config.getUserConfigPath())
case 'atom://.atom/init-script':
return this.workspace.openTextFile(this.getUserInitScriptPath())
}
})
}
registerDefaultTargetForKeymaps () {
this.keymaps.defaultTarget = this.workspace.getElement()
}
observeAutoHideMenuBar () {
this.disposables.add(this.config.onDidChange('core.autoHideMenuBar', ({newValue}) => {
this.setAutoHideMenuBar(newValue)
}))
if (this.config.get('core.autoHideMenuBar')) this.setAutoHideMenuBar(true)
}
async reset () {
this.deserializers.clear()
this.registerDefaultDeserializers()
this.config.clear()
this.config.setSchema(null, {type: 'object', properties: _.clone(ConfigSchema)})
this.keymaps.clear()
this.keymaps.loadBundledKeymaps()
this.commands.clear()
this.registerDefaultCommands()
this.styles.restoreSnapshot(this.initialStyleElements)
this.menu.clear()
this.clipboard.reset()
this.notifications.clear()
this.contextMenu.clear()
await this.packages.reset()
this.workspace.reset(this.packages)
this.registerDefaultOpeners()
this.project.reset(this.packages)
this.workspace.subscribeToEvents()
this.grammars.clear()
this.textEditors.clear()
this.views.clear()
this.pathsWithWaitSessions.clear()
}
destroy () {
if (!this.project) return
this.disposables.dispose()
if (this.workspace) this.workspace.destroy()
this.workspace = null
this.themes.workspace = null
if (this.project) this.project.destroy()
this.project = null
this.commands.clear()
if (this.stylesElement) this.stylesElement.remove()
this.autoUpdater.destroy()
this.uriHandlerRegistry.destroy()
this.uninstallWindowEventHandler()
}
/*
Section: Event Subscription
*/
// Extended: Invoke the given callback whenever {::beep} is called.
//
// * `callback` {Function} to be called whenever {::beep} is called.
//
// Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
onDidBeep (callback) {
return this.emitter.on('did-beep', callback)
}
// Extended: Invoke the given callback when there is an unhandled error, but
// before the devtools pop open
//
// * `callback` {Function} to be called whenever there is an unhandled error
// * `event` {Object}
// * `originalError` {Object} the original error object
// * `message` {String} the original error object
// * `url` {String} Url to the file where the error originated.
// * `line` {Number}
// * `column` {Number}
// * `preventDefault` {Function} call this to avoid popping up the dev tools.
//
// Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
onWillThrowError (callback) {
return this.emitter.on('will-throw-error', callback)
}
// Extended: Invoke the given callback whenever there is an unhandled error.
//
// * `callback` {Function} to be called whenever there is an unhandled error
// * `event` {Object}
// * `originalError` {Object} the original error object
// * `message` {String} the original error object
// * `url` {String} Url to the file where the error originated.
// * `line` {Number}
// * `column` {Number}
//
// Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
onDidThrowError (callback) {
return this.emitter.on('did-throw-error', callback)
}
// TODO: Make this part of the public API. We should make onDidThrowError
// match the interface by only yielding an exception object to the handler
// and deprecating the old behavior.
onDidFailAssertion (callback) {
return this.emitter.on('did-fail-assertion', callback)
}
// Extended: Invoke the given callback as soon as the shell environment is
// loaded (or immediately if it was already loaded).
//
// * `callback` {Function} to be called whenever there is an unhandled error
whenShellEnvironmentLoaded (callback) {
if (this.shellEnvironmentLoaded) {
callback()
return new Disposable()
} else {
return this.emitter.once('loaded-shell-environment', callback)
}
}
/*
Section: Atom Details
*/
// Public: Returns a {Boolean} that is `true` if the current window is in development mode.
inDevMode () {
if (this.devMode == null) this.devMode = this.getLoadSettings().devMode
return this.devMode
}
// Public: Returns a {Boolean} that is `true` if the current window is in safe mode.
inSafeMode () {
if (this.safeMode == null) this.safeMode = this.getLoadSettings().safeMode
return this.safeMode
}
// Public: Returns a {Boolean} that is `true` if the current window is running specs.
inSpecMode () {
if (this.specMode == null) this.specMode = this.getLoadSettings().isSpec
return this.specMode
}
// Returns a {Boolean} indicating whether this the first time the window's been
// loaded.
isFirstLoad () {
if (this.firstLoad == null) this.firstLoad = this.getLoadSettings().firstLoad
return this.firstLoad
}
// Public: Get the version of the Atom application.
//
// Returns the version text {String}.
getVersion () {
if (this.appVersion == null) this.appVersion = this.getLoadSettings().appVersion
return this.appVersion
}
// Public: Gets the release channel of the Atom application.
//
// Returns the release channel as a {String}. Will return a specific release channel
// name like 'beta' or 'nightly' if one is found in the Atom version or 'stable'
// otherwise.
getReleaseChannel () {
// This matches stable, dev (with or without commit hash) and any other
// release channel following the pattern '1.00.0-channel0'
const match = this.getVersion().match(/\d+\.\d+\.\d+(-([a-z]+)(\d+|-\w{4,})?)?$/)
if (!match) {
return 'unrecognized'
} else if (match[2]) {
return match[2]
}
return 'stable'
}
// Public: Returns a {Boolean} that is `true` if the current version is an official release.
isReleasedVersion () {
return this.getReleaseChannel().match(/stable|beta|nightly/) != null
}
// Public: Get the time taken to completely load the current window.
//
// This time include things like loading and activating packages, creating
// DOM elements for the editor, and reading the config.
//
// Returns the {Number} of milliseconds taken to load the window or null
// if the window hasn't finished loading yet.
getWindowLoadTime () {
return this.loadTime
}
// Public: Get the load settings for the current window.
//
// Returns an {Object} containing all the load setting key/value pairs.
getLoadSettings () {
return this.applicationDelegate.getWindowLoadSettings()
}
/*
Section: Managing The Atom Window
*/
// Essential: Open a new Atom window using the given options.
//
// Calling this method without an options parameter will open a prompt to pick
// a file/folder to open in the new window.
//
// * `params` An {Object} with the following keys:
// * `pathsToOpen` An {Array} of {String} paths to open.
// * `newWindow` A {Boolean}, true to always open a new window instead of
// reusing existing windows depending on the paths to open.
// * `devMode` A {Boolean}, true to open the window in development mode.
// Development mode loads the Atom source from the locally cloned
// repository and also loads all the packages in ~/.atom/dev/packages
// * `safeMode` A {Boolean}, true to open the window in safe mode. Safe
// mode prevents all packages installed to ~/.atom/packages from loading.
open (params) {
return this.applicationDelegate.open(params)
}
// Extended: Prompt the user to select one or more folders.
//
// * `callback` A {Function} to call once the user has confirmed the selection.
// * `paths` An {Array} of {String} paths that the user selected, or `null`
// if the user dismissed the dialog.
pickFolder (callback) {
return this.applicationDelegate.pickFolder(callback)
}
// Essential: Close the current window.
close () {
return this.applicationDelegate.closeWindow()
}
// Essential: Get the size of current window.
//
// Returns an {Object} in the format `{width: 1000, height: 700}`
getSize () {
return this.applicationDelegate.getWindowSize()
}
// Essential: Set the size of current window.
//
// * `width` The {Number} of pixels.
// * `height` The {Number} of pixels.
setSize (width, height) {
return this.applicationDelegate.setWindowSize(width, height)
}
// Essential: Get the position of current window.
//
// Returns an {Object} in the format `{x: 10, y: 20}`
getPosition () {
return this.applicationDelegate.getWindowPosition()
}
// Essential: Set the position of current window.
//
// * `x` The {Number} of pixels.
// * `y` The {Number} of pixels.
setPosition (x, y) {
return this.applicationDelegate.setWindowPosition(x, y)
}
// Extended: Get the current window
getCurrentWindow () {
return this.applicationDelegate.getCurrentWindow()
}
// Extended: Move current window to the center of the screen.
center () {
return this.applicationDelegate.centerWindow()
}
// Extended: Focus the current window.
focus () {
this.applicationDelegate.focusWindow()
return this.window.focus()
}
// Extended: Show the current window.
show () {
return this.applicationDelegate.showWindow()
}
// Extended: Hide the current window.
hide () {
return this.applicationDelegate.hideWindow()
}
// Extended: Reload the current window.
reload () {
return this.applicationDelegate.reloadWindow()
}
// Extended: Relaunch the entire application.
restartApplication () {
return this.applicationDelegate.restartApplication()
}
// Extended: Returns a {Boolean} that is `true` if the current window is maximized.
isMaximized () {
return this.applicationDelegate.isWindowMaximized()
}
maximize () {
return this.applicationDelegate.maximizeWindow()
}
// Extended: Returns a {Boolean} that is `true` if the current window is in full screen mode.
isFullScreen () {
return this.applicationDelegate.isWindowFullScreen()
}
// Extended: Set the full screen state of the current window.
setFullScreen (fullScreen = false) {
return this.applicationDelegate.setWindowFullScreen(fullScreen)
}
// Extended: Toggle the full screen state of the current window.
toggleFullScreen () {
return this.setFullScreen(!this.isFullScreen())
}
// Restore the window to its previous dimensions and show it.
//
// Restores the full screen and maximized state after the window has resized to
// prevent resize glitches.
async displayWindow () {
await this.restoreWindowDimensions()
const steps = [
this.restoreWindowBackground(),
this.show(),
this.focus()
]
if (this.windowDimensions && this.windowDimensions.fullScreen) {
steps.push(this.setFullScreen(true))
}
if (this.windowDimensions && this.windowDimensions.maximized && process.platform !== 'darwin') {
steps.push(this.maximize())
}
await Promise.all(steps)
}
// Get the dimensions of this window.
//
// Returns an {Object} with the following keys:
// * `x` The window's x-position {Number}.
// * `y` The window's y-position {Number}.
// * `width` The window's width {Number}.
// * `height` The window's height {Number}.
getWindowDimensions () {
const browserWindow = this.getCurrentWindow()
const [x, y] = browserWindow.getPosition()
const [width, height] = browserWindow.getSize()
const maximized = browserWindow.isMaximized()
return {x, y, width, height, maximized}
}
// Set the dimensions of the window.
//
// The window will be centered if either the x or y coordinate is not set
// in the dimensions parameter. If x or y are omitted the window will be
// centered. If height or width are omitted only the position will be changed.
//
// * `dimensions` An {Object} with the following keys:
// * `x` The new x coordinate.
// * `y` The new y coordinate.
// * `width` The new width.
// * `height` The new height.
setWindowDimensions ({x, y, width, height}) {
const steps = []
if (width != null && height != null) {
steps.push(this.setSize(width, height))
}
if (x != null && y != null) {
steps.push(this.setPosition(x, y))
} else {
steps.push(this.center())
}
return Promise.all(steps)
}
// Returns true if the dimensions are useable, false if they should be ignored.
// Work around for https://github.com/atom/atom-shell/issues/473
isValidDimensions ({x, y, width, height} = {}) {
return (width > 0) && (height > 0) && ((x + width) > 0) && ((y + height) > 0)
}
storeWindowDimensions () {
this.windowDimensions = this.getWindowDimensions()
if (this.isValidDimensions(this.windowDimensions)) {
localStorage.setItem('defaultWindowDimensions', JSON.stringify(this.windowDimensions))
}
}
getDefaultWindowDimensions () {
const {windowDimensions} = this.getLoadSettings()
if (windowDimensions) return windowDimensions
let dimensions
try {
dimensions = JSON.parse(localStorage.getItem('defaultWindowDimensions'))
} catch (error) {
console.warn('Error parsing default window dimensions', error)
localStorage.removeItem('defaultWindowDimensions')
}
if (dimensions && this.isValidDimensions(dimensions)) {
return dimensions
} else {
const {width, height} = this.applicationDelegate.getPrimaryDisplayWorkAreaSize()
return {x: 0, y: 0, width: Math.min(1024, width), height}
}
}
async restoreWindowDimensions () {
if (!this.windowDimensions || !this.isValidDimensions(this.windowDimensions)) {
this.windowDimensions = this.getDefaultWindowDimensions()
}
await this.setWindowDimensions(this.windowDimensions)
return this.windowDimensions
}
restoreWindowBackground () {
const backgroundColor = window.localStorage.getItem('atom:window-background-color')
if (backgroundColor) {
this.backgroundStylesheet = document.createElement('style')
this.backgroundStylesheet.type = 'text/css'
this.backgroundStylesheet.innerText = `html, body { background: ${backgroundColor} !important; }`
document.head.appendChild(this.backgroundStylesheet)
}
}
storeWindowBackground () {
if (this.inSpecMode()) return
const backgroundColor = this.window.getComputedStyle(this.workspace.getElement())['background-color']
this.window.localStorage.setItem('atom:window-background-color', backgroundColor)
}
// Call this method when establishing a real application window.
async startEditorWindow () {
if (this.getLoadSettings().clearWindowState) {
await this.stateStore.clear()
}
this.unloading = false
const updateProcessEnvPromise = this.updateProcessEnvAndTriggerHooks()
const loadStatePromise = this.loadState().then(async state => {
this.windowDimensions = state && state.windowDimensions
if (!this.getLoadSettings().headless) {
await this.displayWindow()
}
this.commandInstaller.installAtomCommand(false, (error) => {
if (error) console.warn(error.message)
})
this.commandInstaller.installApmCommand(false, (error) => {
if (error) console.warn(error.message)
})
this.disposables.add(this.applicationDelegate.onDidChangeUserSettings(settings =>
this.config.resetUserSettings(settings)
))
this.disposables.add(this.applicationDelegate.onDidFailToReadUserSettings(message =>
this.notifications.addError(message)
))
this.disposables.add(this.applicationDelegate.onDidOpenLocations(this.openLocations.bind(this)))
this.disposables.add(this.applicationDelegate.onApplicationMenuCommand(this.dispatchApplicationMenuCommand.bind(this)))
this.disposables.add(this.applicationDelegate.onContextMenuCommand(this.dispatchContextMenuCommand.bind(this)))
this.disposables.add(this.applicationDelegate.onURIMessage(this.dispatchURIMessage.bind(this)))
this.disposables.add(this.applicationDelegate.onDidRequestUnload(this.prepareToUnloadEditorWindow.bind(this)))
this.listenForUpdates()
this.registerDefaultTargetForKeymaps()
this.packages.loadPackages()
const startTime = Date.now()
await this.deserialize(state)
this.deserializeTimings.atom = Date.now() - startTime
if (process.platform === 'darwin' && this.config.get('core.titleBar') === 'custom') {
this.workspace.addHeaderPanel({item: new TitleBar({workspace: this.workspace, themes: this.themes, applicationDelegate: this.applicationDelegate})})
this.document.body.classList.add('custom-title-bar')
}
if (process.platform === 'darwin' && this.config.get('core.titleBar') === 'custom-inset') {
this.workspace.addHeaderPanel({item: new TitleBar({workspace: this.workspace, themes: this.themes, applicationDelegate: this.applicationDelegate})})
this.document.body.classList.add('custom-inset-title-bar')
}
if (process.platform === 'darwin' && this.config.get('core.titleBar') === 'hidden') {
this.document.body.classList.add('hidden-title-bar')
}
this.document.body.appendChild(this.workspace.getElement())
if (this.backgroundStylesheet) this.backgroundStylesheet.remove()
let previousProjectPaths = this.project.getPaths()
this.disposables.add(this.project.onDidChangePaths(newPaths => {
for (let path of previousProjectPaths) {
if (this.pathsWithWaitSessions.has(path) && !newPaths.includes(path)) {
this.applicationDelegate.didClosePathWithWaitSession(path)
}
}
previousProjectPaths = newPaths
this.applicationDelegate.setProjectRoots(newPaths)
}))
this.disposables.add(this.workspace.onDidDestroyPaneItem(({item}) => {
const path = item.getPath && item.getPath()
if (this.pathsWithWaitSessions.has(path)) {
this.applicationDelegate.didClosePathWithWaitSession(path)
}
}))
this.packages.activate()
this.keymaps.loadUserKeymap()
if (!this.getLoadSettings().safeMode) this.requireUserInitScript()
this.menu.update()
await this.openInitialEmptyEditorIfNecessary()
})
const loadHistoryPromise = this.history.loadState().then(() => {
this.reopenProjectMenuManager = new ReopenProjectMenuManager({
menu: this.menu,
commands: this.commands,
history: this.history,
config: this.config,
open: paths => this.open({pathsToOpen: paths})
})
this.reopenProjectMenuManager.update()
})
return Promise.all([loadStatePromise, loadHistoryPromise, updateProcessEnvPromise])
}
serialize (options) {
return {
version: this.constructor.version,
project: this.project.serialize(options),
workspace: this.workspace.serialize(),
packageStates: this.packages.serialize(),
grammars: this.grammars.serialize(),
fullScreen: this.isFullScreen(),
windowDimensions: this.windowDimensions
}
}
async prepareToUnloadEditorWindow () {
try {
await this.saveState({isUnloading: true})
} catch (error) {
console.error(error)
}
const closing = !this.workspace || await this.workspace.confirmClose({
windowCloseRequested: true,
projectHasPaths: this.project.getPaths().length > 0
})
if (closing) {
this.unloading = true
await this.packages.deactivatePackages()
}
return closing
}
unloadEditorWindow () {
if (!this.project) return
this.storeWindowBackground()
this.saveBlobStoreSync()
}
saveBlobStoreSync () {
if (this.enablePersistence) {
this.blobStore.save()
}
}
openInitialEmptyEditorIfNecessary () {
if (!this.config.get('core.openEmptyEditorOnStart')) return
const {hasOpenFiles} = this.getLoadSettings()
if (!hasOpenFiles && this.workspace.getPaneItems().length === 0) {
return this.workspace.open(null, {pending: true})
}
}
installUncaughtErrorHandler () {
this.previousWindowErrorHandler = this.window.onerror
this.window.onerror = (message, url, line, column, originalError) => {
const mapping = mapSourcePosition({source: url, line, column})
line = mapping.line
column = mapping.column
if (url === '<embedded>') url = mapping.source
const eventObject = {message, url, line, column, originalError}
let openDevTools = true
eventObject.preventDefault = () => { openDevTools = false }
this.emitter.emit('will-throw-error', eventObject)
if (openDevTools) {
this.openDevTools().then(() =>
this.executeJavaScriptInDevTools('DevToolsAPI.showPanel("console")')
)
}
this.emitter.emit('did-throw-error', {message, url, line, column, originalError})
}
}
uninstallUncaughtErrorHandler () {
this.window.onerror = this.previousWindowErrorHandler
}
installWindowEventHandler () {
this.windowEventHandler = new WindowEventHandler({atomEnvironment: this, applicationDelegate: this.applicationDelegate})
this.windowEventHandler.initialize(this.window, this.document)
}
uninstallWindowEventHandler () {
if (this.windowEventHandler) {
this.windowEventHandler.unsubscribe()
}
this.windowEventHandler = null
}
didChangeStyles (styleElement) {
TextEditor.didUpdateStyles()
if (styleElement.textContent.indexOf('scrollbar') >= 0) {
TextEditor.didUpdateScrollbarStyles()
}
}
async updateProcessEnvAndTriggerHooks () {
await this.updateProcessEnv(this.getLoadSettings().env)
this.shellEnvironmentLoaded = true
this.emitter.emit('loaded-shell-environment')
this.packages.triggerActivationHook('core:loaded-shell-environment')
}
/*
Section: Messaging the User
*/
// Essential: Visually and audibly trigger a beep.
beep () {
if (this.config.get('core.audioBeep')) this.applicationDelegate.playBeepSound()
this.emitter.emit('did-beep')
}
// Essential: A flexible way to open a dialog akin to an alert dialog.
//
// While both async and sync versions are provided, it is recommended to use the async version
// such that the renderer process is not blocked while the dialog box is open.
//
// The async version accepts the same options as Electron's `dialog.showMessageBox`.
// For convenience, it sets `type` to `'info'` and `normalizeAccessKeys` to `true` by default.
//
// If the dialog is closed (via `Esc` key or `X` in the top corner) without selecting a button
// the first button will be clicked unless a "Cancel" or "No" button is provided.
//
// ## Examples
//
// ```js
// // Async version (recommended)
// atom.confirm({
// message: 'How you feeling?',
// detail: 'Be honest.',
// buttons: ['Good', 'Bad']
// }, response => {
// if (response === 0) {
// window.alert('good to hear')
// } else {
// window.alert('bummer')
// }
// })
// ```
//
// ```js
// // Legacy sync version
// const chosen = atom.confirm({
// message: 'How you feeling?',
// detailedMessage: 'Be honest.',
// buttons: {
// Good: () => window.alert('good to hear'),
// Bad: () => window.alert('bummer')
// }
// })
// ```
//
// * `options` An options {Object}. If the callback argument is also supplied, see the documentation at
// https://electronjs.org/docs/api/dialog#dialogshowmessageboxbrowserwindow-options-callback for the list of
// available options. Otherwise, only the following keys are accepted:
// * `message` The {String} message to display.
// * `detailedMessage` (optional) The {String} detailed message to display.
// * `buttons` (optional) Either an {Array} of {String}s or an {Object} where keys are
// button names and the values are callback {Function}s to invoke when clicked.
// * `callback` (optional) A {Function} that will be called with the index of the chosen option.
// If a callback is supplied, the dialog will be non-blocking. This argument is recommended.
//
// Returns the chosen button index {Number} if the buttons option is an array
// or the return value of the callback if the buttons option is an object.
// If a callback function is supplied, returns `undefined`.
confirm (options = {}, callback) {
if (callback) {
// Async: no return value
this.applicationDelegate.confirm(options, callback)
} else {
return this.applicationDelegate.confirm(options)
}
}
/*
Section: Managing the Dev Tools
*/
// Extended: Open the dev tools for the current window.
//
// Returns a {Promise} that resolves when the DevTools have been opened.
openDevTools () {
return this.applicationDelegate.openWindowDevTools()
}
// Extended: Toggle the visibility of the dev tools for the current window.
//
// Returns a {Promise} that resolves when the DevTools have been opened or
// closed.
toggleDevTools () {
return this.applicationDelegate.toggleWindowDevTools()
}
// Extended: Execute code in dev tools.
executeJavaScriptInDevTools (code) {
return this.applicationDelegate.executeJavaScriptInWindowDevTools(code)
}
/*
Section: Private
*/
assert (condition, message, callbackOrMetadata) {
if (condition) return true
const error = new Error(`Assertion failed: ${message}`)
Error.captureStackTrace(error, this.assert)
if (callbackOrMetadata) {
if (typeof callbackOrMetadata === 'function') {
callbackOrMetadata(error)
} else {
error.metadata = callbackOrMetadata
}
}
this.emitter.emit('did-fail-assertion', error)
if (!this.isReleasedVersion()) throw error
return false
}
loadThemes () {
return this.themes.load()
}
setDocumentEdited (edited) {
if (typeof this.applicationDelegate.setWindowDocumentEdited === 'function') {
this.applicationDelegate.setWindowDocumentEdited(edited)
}
}
setRepresentedFilename (filename) {
if (typeof this.applicationDelegate.setWindowRepresentedFilename === 'function') {
this.applicationDelegate.setWindowRepresentedFilename(filename)
}
}
addProjectFolder () {
return new Promise((resolve) => {
this.pickFolder((selectedPaths) => {
this.addToProject(selectedPaths || []).then(resolve)
})
})
}
async addToProject (projectPaths) {
const state = await this.loadState(this.getStateKey(projectPaths))
if (state && (this.project.getPaths().length === 0)) {
this.attemptRestoreProjectStateForPaths(state, projectPaths)
} else {
projectPaths.map((folder) => this.project.addPath(folder))
}
}
async attemptRestoreProjectStateForPaths (state, projectPaths, filesToOpen = []) {
const center = this.workspace.getCenter()
const windowIsUnused = () => {
for (let container of this.workspace.getPaneContainers()) {
for (let item of container.getPaneItems()) {
if (item instanceof TextEditor) {
if (item.getPath() || item.isModified()) return false
} else {
if (container === center) return false
}
}
}
return true
}
if (windowIsUnused()) {
await this.restoreStateIntoThisEnvironment(state)
return Promise.all(filesToOpen.map(file => this.workspace.open(file)))
} else {
let resolveDiscardStatePromise = null
const discardStatePromise = new Promise((resolve) => {
resolveDiscardStatePromise = resolve
})
const nouns = projectPaths.length === 1 ? 'folder' : 'folders'
this.confirm({
message: 'Previous automatically-saved project state detected',
detail: `There is previously saved state for the selected ${nouns}. ` +
`Would you like to add the ${nouns} to this window, permanently discarding the saved state, ` +
`or open the ${nouns} in a new window, restoring the saved state?`,
buttons: [
'&Open in new window and recover state',
'&Add to this window and discard state'
]
}, response => {
if (response === 0) {
this.open({
pathsToOpen: projectPaths.concat(filesToOpen),
newWindow: true,
devMode: this.inDevMode(),
safeMode: this.inSafeMode()
})
resolveDiscardStatePromise(Promise.resolve(null))
} else if (response === 1) {
for (let selectedPath of projectPaths) {
this.project.addPath(selectedPath)
}
resolveDiscardStatePromise(Promise.all(filesToOpen.map(file => this.workspace.open(file))))
}
})
return discardStatePromise
}
}
restoreStateIntoThisEnvironment (state) {
state.fullScreen = this.isFullScreen()
for (let pane of this.workspace.getPanes()) {
pane.destroy()
}
return this.deserialize(state)
}
showSaveDialogSync (options = {}) {
deprecate(`atom.showSaveDialogSync is deprecated and will be removed soon.
Please, implement ::saveAs and ::getSaveDialogOptions instead for pane items
or use Pane::saveItemAs for programmatic saving.`)
return this.applicationDelegate.showSaveDialog(options)
}
async saveState (options, storageKey) {
if (this.enablePersistence && this.project) {
const state = this.serialize(options)
if (!storageKey) storageKey = this.getStateKey(this.project && this.project.getPaths())
if (storageKey) {
await this.stateStore.save(storageKey, state)
} else {
await this.applicationDelegate.setTemporaryWindowState(state)
}
}
}
loadState (stateKey) {
if (this.enablePersistence) {
if (!stateKey) stateKey = this.getStateKey(this.getLoadSettings().initialProjectRoots)
if (stateKey) {
return this.stateStore.load(stateKey)
} else {
return this.applicationDelegate.getTemporaryWindowState()
}
} else {
return Promise.resolve(null)
}
}
async deserialize (state) {
if (!state) return Promise.resolve()
this.setFullScreen(state.fullScreen)
const missingProjectPaths = []
this.packages.packageStates = state.packageStates || {}
let startTime = Date.now()
if (state.project) {
try {
await this.project.deserialize(state.project, this.deserializers)
} catch (error) {
// We handle the missingProjectPaths case in openLocations().
if (!error.missingProjectPaths) {
this.notifications.addError('Unable to deserialize project', {
description: error.message,
stack: error.stack
})
}
}
}
this.deserializeTimings.project = Date.now() - startTime
if (state.grammars) this.grammars.deserialize(state.grammars)
startTime = Date.now()
if (state.workspace) this.workspace.deserialize(state.workspace, this.deserializers)
this.deserializeTimings.workspace = Date.now() - startTime
if (missingProjectPaths.length > 0) {
const count = missingProjectPaths.length === 1 ? '' : missingProjectPaths.length + ' '
const noun = missingProjectPaths.length === 1 ? 'folder' : 'folders'
const toBe = missingProjectPaths.length === 1 ? 'is' : 'are'
const escaped = missingProjectPaths.map(projectPath => `\`${projectPath}\``)
let group
switch (escaped.length) {
case 1:
group = escaped[0]
break
case 2:
group = `${escaped[0]} and ${escaped[1]}`
break
default:
group = escaped.slice(0, -1).join(', ') + `, and ${escaped[escaped.length - 1]}`
}
this.notifications.addError(`Unable to open ${count}project ${noun}`, {
description: `Project ${noun} ${group} ${toBe} no longer on disk.`
})
}
}
getStateKey (paths) {
if (paths && paths.length > 0) {
const sha1 = crypto.createHash('sha1').update(paths.slice().sort().join('\n')).digest('hex')
return `editor-${sha1}`
} else {
return null
}
}
getConfigDirPath () {
if (!this.configDirPath) this.configDirPath = process.env.ATOM_HOME
return this.configDirPath
}
getUserInitScriptPath () {
const initScriptPath = fs.resolve(this.getConfigDirPath(), 'init', ['js', 'coffee'])
return initScriptPath || path.join(this.getConfigDirPath(), 'init.coffee')
}
requireUserInitScript () {
const userInitScriptPath = this.getUserInitScriptPath()
if (userInitScriptPath) {
try {
if (fs.isFileSync(userInitScriptPath)) require(userInitScriptPath)
} catch (error) {
this.notifications.addError(`Failed to load \`${userInitScriptPath}\``, {
detail: error.message,
dismissable: true
})
}
}
}
// TODO: We should deprecate the update events here, and use `atom.autoUpdater` instead
onUpdateAvailable (callback) {
return this.emitter.on('update-available', callback)
}
updateAvailable (details) {
return this.emitter.emit('update-available', details)
}
listenForUpdates () {
// listen for updates available locally (that have been successfully downloaded)
this.disposables.add(this.autoUpdater.onDidCompleteDownloadingUpdate(this.updateAvailable.bind(this)))
}
setBodyPlatformClass () {
this.document.body.classList.add(`platform-${process.platform}`)
}
setAutoHideMenuBar (autoHide) {
this.applicationDelegate.setAutoHideWindowMenuBar(autoHide)
this.applicationDelegate.setWindowMenuBarVisibility(!autoHide)
}
dispatchApplicationMenuCommand (command, arg) {
let {activeElement} = this.document
// Use the workspace element if body has focus
if (activeElement === this.document.body) {
activeElement = this.workspace.getElement()
}
this.commands.dispatch(activeElement, command, arg)
}
dispatchContextMenuCommand (command, ...args) {
this.commands.dispatch(this.contextMenu.activeElement, command, args)
}
dispatchURIMessage (uri) {
if (this.packages.hasLoadedInitialPackages()) {
this.uriHandlerRegistry.handleURI(uri)
} else {
let subscription = this.packages.onDidLoadInitialPackages(() => {
subscription.dispose()
this.uriHandlerRegistry.handleURI(uri)
})
}
}
async openLocations (locations) {
const needsProjectPaths = this.project && this.project.getPaths().length === 0
const foldersToAddToProject = new Set()
const fileLocationsToOpen = []
const missingFolders = []
// Asynchronously fetch stat information about each requested path to open.
const locationStats = await Promise.all(
locations.map(async location => {
const stats = location.pathToOpen ? await stat(location.pathToOpen).catch(() => null) : null
return {location, stats}
}),
)
for (const {location, stats} of locationStats) {
const {pathToOpen} = location
if (!pathToOpen) {
// Untitled buffer
fileLocationsToOpen.push(location)
continue
}
if (stats !== null) {
// Path exists
if (stats.isDirectory()) {
// Directory: add as a project folder
foldersToAddToProject.add(this.project.getDirectoryForProjectPath(pathToOpen).getPath())
} else if (stats.isFile()) {
if (location.isDirectory) {
// File: no longer a directory
missingFolders.push(location)
} else {
// File: add as a file location
fileLocationsToOpen.push(location)
}
}
} else {
// Path does not exist
// Attempt to interpret as a URI from a non-default directory provider
const directory = this.project.getProvidedDirectoryForProjectPath(pathToOpen)
if (directory) {
// Found: add as a project folder
foldersToAddToProject.add(directory.getPath())
} else if (location.isDirectory) {
// Not found and must be a directory: add to missing list and use to derive state key
missingFolders.push(location)
} else {
// Not found: open as a new file
fileLocationsToOpen.push(location)
}
}
if (location.hasWaitSession) this.pathsWithWaitSessions.add(pathToOpen)
}
let restoredState = false
if (foldersToAddToProject.size > 0 || missingFolders.length > 0) {
// Include missing folders in the state key so that sessions restored with no-longer-present project root folders
// don't lose data.
const foldersForStateKey = Array.from(foldersToAddToProject)
.concat(missingFolders.map(location => location.pathToOpen))
const state = await this.loadState(this.getStateKey(Array.from(foldersForStateKey)))
// only restore state if this is the first path added to the project
if (state && needsProjectPaths) {
const files = fileLocationsToOpen.map((location) => location.pathToOpen)
await this.attemptRestoreProjectStateForPaths(state, Array.from(foldersToAddToProject), files)
restoredState = true
} else {
for (let folder of foldersToAddToProject) {
this.project.addPath(folder)
}
}
}
if (!restoredState) {
const fileOpenPromises = []
for (const {pathToOpen, initialLine, initialColumn} of fileLocationsToOpen) {
fileOpenPromises.push(this.workspace && this.workspace.open(pathToOpen, {initialLine, initialColumn}))
}
await Promise.all(fileOpenPromises)
}
if (missingFolders.length > 0) {
let message = 'Unable to open project folder'
if (missingFolders.length > 1) {
message += 's'
}
let description = 'The '
if (missingFolders.length === 1) {
description += 'directory `'
description += missingFolders[0].pathToOpen
description += '` does not exist.'
} else if (missingFolders.length === 2) {
description += `directories \`${missingFolders[0].pathToOpen}\` `
description += `and \`${missingFolders[1].pathToOpen}\` do not exist.`
} else {
description += 'directories '
description += (missingFolders
.slice(0, -1)
.map(location => location.pathToOpen)
.map(pathToOpen => '`' + pathToOpen + '`, ')
.join(''))
description += 'and `' + missingFolders[missingFolders.length - 1].pathToOpen + '` do not exist.'
}
this.notifications.addWarning(message, {description})
}
ipcRenderer.send('window-command', 'window:locations-opened')
}
resolveProxy (url) {
return new Promise((resolve, reject) => {
const requestId = this.nextProxyRequestId++
const disposable = this.applicationDelegate.onDidResolveProxy((id, proxy) => {
if (id === requestId) {
disposable.dispose()
resolve(proxy)
}
})
return this.applicationDelegate.resolveProxy(requestId, url)
})
}
}
AtomEnvironment.version = 1
AtomEnvironment.prototype.saveStateDebounceInterval = 1000
module.exports = AtomEnvironment
/* eslint-disable */
// Preserve this deprecation until 2.0. Sorry. Should have removed Q sooner.
Promise.prototype.done = function (callback) {
deprecate('Atom now uses ES6 Promises instead of Q. Call promise.then instead of promise.done')
return this.then(callback)
}
/* eslint-enable */
|
// Copyright (C) 2011-2015 Internet Systems Consortium, Inc. ("ISC")
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef MESSAGE_READER_H
#define MESSAGE_READER_H
#include <map>
#include <string>
#include <vector>
#include <log/message_dictionary.h>
#include <log/message_types.h>
namespace isc {
namespace log {
/// \brief Read Message File
///
/// Reads a message file and creates a map of identifier against the text of the
/// message. This map can be retrieved for subsequent processing.
class MessageReader {
public:
/// \brief Read Mode
///
/// If ADD, messages are added to the dictionary if the ID does not exist
/// there. If it does, the ID is added to the dictionary's overflow
/// vector.
///
/// If REPLACE, the dictionary is only modified if the message ID already
/// exists in it. New message IDs are added to the overflow vector.
typedef enum {
ADD,
REPLACE
} Mode;
/// \brief Visible collection types
typedef std::vector<std::string> MessageIDCollection;
/// \brief Constructor
///
/// Default constructor. All work is done in the main readFile code (so
/// that a status return can be returned instead of needing to throw an
/// exception).
///
/// \param dictionary Dictionary to which messages read read from the file
/// are added. (This should be a local dictionary when the class is used in
/// the message compiler, and the global dictionary when used in a server.
/// The ownership of the dictionary object is not transferred - the caller
/// is responsible for managing the lifetime of the dictionary.
MessageReader(MessageDictionary* dictionary = NULL) :
dictionary_(dictionary), lineno_(0)
{}
/// \brief Virtual Destructor
virtual ~MessageReader()
{}
/// \brief Get Dictionary
///
/// Returns the pointer to the dictionary object. Note that ownership is
/// not transferred - the caller should not delete it.
///
/// \return Pointer to current dictionary object
MessageDictionary* getDictionary() const {
return (dictionary_);
}
/// \brief Set Dictionary
///
/// Sets the current dictionary object.
///
/// \param dictionary New dictionary object. The ownership of the dictionary
/// object is not transferred - the caller is responsible for managing the
/// lifetime of the dictionary.
void setDictionary(MessageDictionary* dictionary) {
dictionary_ = dictionary;
}
/// \brief Read File
///
/// This is the main method of the class and reads in the file, parses it,
/// and stores the result in the message dictionary.
///
/// \param file Name of the message file.
/// \param mode Addition mode. See the description of the "Mode" enum.
virtual void readFile(const std::string& file, Mode mode = ADD);
/// \brief Process Line
///
/// Parses a text line and adds it to the message map. Although this is
/// for use in readFile, it can also be used to add individual messages
/// to the message map.
///
/// \param line Line of text to process
/// \param mode If a message line, how to add the message to the dictionary.
virtual void processLine(const std::string& line, Mode mode = ADD);
/// \brief Get Namespace
///
/// \return Argument to the $NAMESPACE directive (if present)
virtual std::string getNamespace() const {
return (ns_);
}
/// \brief Clear Namespace
///
/// Clears the current namespace.
virtual void clearNamespace() {
ns_ = "";
}
/// \brief Get Prefix
///
/// \return Argument to the $PREFIX directive (if present)
virtual std::string getPrefix() const {
return (prefix_);
}
/// \brief Clear Prefix
///
/// Clears the current prefix.
virtual void clearPrefix() {
prefix_ = "";
}
/// \brief Get Not-Added List
///
/// Returns the list of IDs that were not added during the last
/// read of the file.
///
/// \return Collection of messages not added
MessageIDCollection getNotAdded() const {
return (not_added_);
}
private:
/// \brief Handle a Message Definition
///
/// Passed a line that should contain a message, this processes that line
/// and adds it to the dictionary according to the mode setting.
///
/// \param line Line of text
/// \param ADD or REPLACE depending on how the reader is operating. (See
/// the description of the Mode typedef for details.)
void parseMessage(const std::string& line, Mode mode);
/// \brief Handle Directive
///
/// Passed a line starting with a "$", this handles the processing of
/// directives.
///
/// \param line Line of text that starts with "$",
void parseDirective(const std::string& line);
/// \brief Parse $PREFIX line
///
/// \param tokens $PREFIX line split into tokens
void parsePrefix(const std::vector<std::string>& tokens);
/// \brief Parse $NAMESPACE line
///
/// \param tokens $NAMESPACE line split into tokens
void parseNamespace(const std::vector<std::string>& tokens);
/// \brief Check for invalid C++ symbol name
///
/// The message ID (or concatenation of prefix and message ID) will be used
/// as the name of a symbol in C++ code. This function checks if the name
/// is invalid (contains anything other than alphanumeric characters or
/// underscores, or starts with a digit).
///
/// \param symbol name to check to see if it is an invalid C++ symbol.
///
/// \return true if the name is invalid, false if it is valid.
bool invalidSymbol(const std::string& symbol);
/// Attributes
MessageDictionary* dictionary_; ///< Dictionary to add messages to
MessageIDCollection not_added_; ///< List of IDs not added
int lineno_; ///< Number of last line read
std::string prefix_; ///< Argument of $PREFIX statement
std::string ns_; ///< Argument of $NAMESPACE statement
};
} // namespace log
} // namespace isc
#endif // MESSAGE_READER_H
|
"""Checkout-related utility functions."""
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union, cast
import graphene
from django.core.exceptions import ValidationError
from django.utils import timezone
from prices import Money
from ..account.models import User
from ..core.exceptions import ProductNotPublished
from ..core.taxes import zero_taxed_money
from ..core.utils.promo_code import (
InvalidPromoCode,
promo_code_is_gift_card,
promo_code_is_voucher,
)
from ..discount import DiscountInfo, VoucherType
from ..discount.models import NotApplicable, Voucher
from ..discount.utils import (
get_products_voucher_discount,
validate_voucher_for_checkout,
)
from ..giftcard.utils import (
add_gift_card_code_to_checkout,
remove_gift_card_code_from_checkout,
)
from ..plugins.manager import PluginsManager
from ..product import models as product_models
from ..shipping.models import ShippingMethod
from ..warehouse.availability import (
check_stock_and_preorder_quantity,
check_stock_and_preorder_quantity_bulk,
)
from ..warehouse.models import Warehouse
from ..warehouse.reservations import reserve_stocks_and_preorders
from . import AddressType, calculations
from .error_codes import CheckoutErrorCode
from .fetch import (
update_checkout_info_delivery_method,
update_checkout_info_shipping_address,
)
from .models import Checkout, CheckoutLine
if TYPE_CHECKING:
# flake8: noqa
from prices import TaxedMoney
from ..account.models import Address
from ..order.models import Order
from .fetch import CheckoutInfo, CheckoutLineInfo
PRIVATE_META_APP_SHIPPING_ID = "external_app_shipping_id"
def get_user_checkout(
user: User, checkout_queryset=Checkout.objects.all()
) -> Tuple[Optional[Checkout], bool]:
return checkout_queryset.filter(user=user, channel__is_active=True).first()
def check_variant_in_stock(
checkout: Checkout,
variant: product_models.ProductVariant,
channel_slug: str,
quantity: int = 1,
replace: bool = False,
check_quantity: bool = True,
checkout_lines: Optional[List["CheckoutLine"]] = None,
check_reservations: bool = False,
) -> Tuple[int, Optional[CheckoutLine]]:
"""Check if a given variant is in stock and return the new quantity + line."""
line = checkout.lines.filter(variant=variant).first()
line_quantity = 0 if line is None else line.quantity
new_quantity = quantity if replace else (quantity + line_quantity)
if new_quantity < 0:
raise ValueError(
"%r is not a valid quantity (results in %r)" % (quantity, new_quantity)
)
if new_quantity > 0 and check_quantity:
check_stock_and_preorder_quantity(
variant,
checkout.get_country(),
channel_slug,
new_quantity,
checkout_lines,
check_reservations,
)
return new_quantity, line
def add_variant_to_checkout(
checkout_info: "CheckoutInfo",
variant: product_models.ProductVariant,
quantity: int = 1,
replace: bool = False,
check_quantity: bool = True,
):
"""Add a product variant to checkout.
If `replace` is truthy then any previous quantity is discarded instead
of added to.
This function is not used outside of test suite.
"""
checkout = checkout_info.checkout
channel_slug = checkout_info.channel.slug
product_channel_listing = product_models.ProductChannelListing.objects.filter(
channel_id=checkout.channel_id, product_id=variant.product_id
).first()
if not product_channel_listing or not product_channel_listing.is_published:
raise ProductNotPublished()
new_quantity, line = check_variant_in_stock(
checkout,
variant,
channel_slug,
quantity=quantity,
replace=replace,
check_quantity=check_quantity,
)
if line is None:
line = checkout.lines.filter(variant=variant).first()
if new_quantity == 0:
if line is not None:
line.delete()
line = None
elif line is None:
line = checkout.lines.create(
checkout=checkout, variant=variant, quantity=new_quantity
)
elif new_quantity > 0:
line.quantity = new_quantity
line.save(update_fields=["quantity"])
return checkout
def calculate_checkout_quantity(lines: Iterable["CheckoutLineInfo"]):
return sum([line_info.line.quantity for line_info in lines])
def add_variants_to_checkout(
checkout,
variants,
quantities,
channel_slug,
replace=False,
replace_reservations=False,
reservation_length: Optional[int] = None,
):
"""Add variants to checkout.
If a variant is not placed in checkout, a new checkout line will be created.
If quantity is set to 0, checkout line will be deleted.
Otherwise, quantity will be added or replaced (if replace argument is True).
"""
country_code = checkout.get_country()
checkout_lines = checkout.lines.select_related("variant")
variant_ids_in_lines = {line.variant_id: line for line in checkout_lines}
to_create = []
to_update = []
to_delete = []
for variant, quantity in zip(variants, quantities):
if variant.pk in variant_ids_in_lines:
line = variant_ids_in_lines[variant.pk]
if quantity > 0:
if replace:
line.quantity = quantity
else:
line.quantity += quantity
to_update.append(line)
else:
to_delete.append(line)
elif quantity > 0:
to_create.append(
CheckoutLine(checkout=checkout, variant=variant, quantity=quantity)
)
if to_delete:
CheckoutLine.objects.filter(pk__in=[line.pk for line in to_delete]).delete()
if to_update:
CheckoutLine.objects.bulk_update(to_update, ["quantity"])
if to_create:
CheckoutLine.objects.bulk_create(to_create)
to_reserve = to_create + to_update
if reservation_length and to_reserve:
updated_lines_ids = [line.pk for line in to_reserve + to_delete]
for line in checkout_lines:
if line.pk not in updated_lines_ids:
to_reserve.append(line)
variants.append(line.variant)
reserve_stocks_and_preorders(
to_reserve,
variants,
country_code,
channel_slug,
reservation_length,
replace=replace_reservations,
)
return checkout
def _check_new_checkout_address(checkout, address, address_type):
"""Check if and address in checkout has changed and if to remove old one."""
if address_type == AddressType.BILLING:
old_address = checkout.billing_address
else:
old_address = checkout.shipping_address
has_address_changed = any(
[
not address and old_address,
address and not old_address,
address and old_address and address != old_address,
]
)
remove_old_address = (
has_address_changed
and old_address is not None
and (not checkout.user or old_address not in checkout.user.addresses.all())
)
return has_address_changed, remove_old_address
def change_billing_address_in_checkout(checkout, address):
"""Save billing address in checkout if changed.
Remove previously saved address if not connected to any user.
"""
changed, remove = _check_new_checkout_address(
checkout, address, AddressType.BILLING
)
if changed:
if remove:
checkout.billing_address.delete()
checkout.billing_address = address
checkout.save(update_fields=["billing_address", "last_change"])
def change_shipping_address_in_checkout(
checkout_info: "CheckoutInfo",
address: "Address",
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
manager: "PluginsManager",
):
"""Save shipping address in checkout if changed.
Remove previously saved address if not connected to any user.
"""
checkout = checkout_info.checkout
changed, remove = _check_new_checkout_address(
checkout, address, AddressType.SHIPPING
)
if changed:
if remove:
checkout.shipping_address.delete() # type: ignore
checkout.shipping_address = address
update_checkout_info_shipping_address(
checkout_info, address, lines, discounts, manager
)
checkout.save(update_fields=["shipping_address", "last_change"])
def _get_shipping_voucher_discount_for_checkout(
manager: PluginsManager,
voucher: Voucher,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Calculate discount value for a voucher of shipping type."""
if not is_shipping_required(lines):
msg = "Your order does not require shipping."
raise NotApplicable(msg)
shipping_method = checkout_info.delivery_method_info.delivery_method
if not shipping_method:
msg = "Please select a delivery method first."
raise NotApplicable(msg)
# check if voucher is limited to specified countries
if address:
if voucher.countries and address.country.code not in voucher.countries:
msg = "This offer is not valid in your country."
raise NotApplicable(msg)
shipping_price = calculations.checkout_shipping_price(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
).gross
return voucher.get_discount_amount_for(shipping_price, checkout_info.channel)
def _get_products_voucher_discount(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Calculate products discount value for a voucher, depending on its type."""
prices = None
if voucher.type == VoucherType.SPECIFIC_PRODUCT:
prices = get_prices_of_discounted_specific_product(
manager, checkout_info, lines, voucher, discounts
)
if not prices:
msg = "This offer is only valid for selected items."
raise NotApplicable(msg)
return get_products_voucher_discount(voucher, prices, checkout_info.channel)
def get_discounted_lines(
lines: Iterable["CheckoutLineInfo"], voucher
) -> Iterable["CheckoutLineInfo"]:
discounted_variants = voucher.variants.all()
discounted_products = voucher.products.all()
discounted_categories = set(voucher.categories.all())
discounted_collections = set(voucher.collections.all())
discounted_lines = []
if (
discounted_products
or discounted_collections
or discounted_categories
or discounted_variants
):
for line_info in lines:
line_variant = line_info.variant
line_product = line_info.product
line_category = line_info.product.category
line_collections = set(line_info.collections)
if line_info.variant and (
line_variant in discounted_variants
or line_product in discounted_products
or line_category in discounted_categories
or line_collections.intersection(discounted_collections)
):
discounted_lines.append(line_info)
else:
# If there's no discounted products, collections or categories,
# it means that all products are discounted
discounted_lines.extend(lines)
return discounted_lines
def get_prices_of_discounted_specific_product(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher: Voucher,
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> List[Money]:
"""Get prices of variants belonging to the discounted specific products.
Specific products are products, collections and categories.
Product must be assigned directly to the discounted category, assigning
product to child category won't work.
"""
line_prices = []
discounted_lines: Iterable["CheckoutLineInfo"] = get_discounted_lines(
lines, voucher
)
address = checkout_info.shipping_address or checkout_info.billing_address
discounts = discounts or []
for line_info in discounted_lines:
line = line_info.line
line_unit_price = manager.calculate_checkout_line_unit_price(
checkout_info,
lines,
line_info,
address,
discounts,
).price_with_sale.gross
line_prices.extend([line_unit_price] * line.quantity)
return line_prices
def get_voucher_discount_for_checkout(
manager: PluginsManager,
voucher: Voucher,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> Money:
"""Calculate discount value depending on voucher and discount types.
Raise NotApplicable if voucher of given type cannot be applied.
"""
validate_voucher_for_checkout(manager, voucher, checkout_info, lines, discounts)
if voucher.type == VoucherType.ENTIRE_ORDER:
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
).gross
return voucher.get_discount_amount_for(subtotal, checkout_info.channel)
if voucher.type == VoucherType.SHIPPING:
return _get_shipping_voucher_discount_for_checkout(
manager, voucher, checkout_info, lines, address, discounts
)
if voucher.type == VoucherType.SPECIFIC_PRODUCT:
return _get_products_voucher_discount(
manager, checkout_info, lines, voucher, discounts
)
raise NotImplementedError("Unknown discount type")
def get_voucher_for_checkout(
checkout: "Checkout",
channel_slug: str,
with_lock: bool = False,
with_prefetch: bool = False,
) -> Optional[Voucher]:
"""Return voucher assigned to checkout."""
if checkout.voucher_code is not None:
vouchers = Voucher.objects
vouchers = vouchers.active_in_channel(
date=timezone.now(), channel_slug=channel_slug
)
if with_prefetch:
vouchers.prefetch_related(
"products", "collections", "categories", "variants", "channel_listings"
)
try:
qs = vouchers
voucher = qs.get(code=checkout.voucher_code)
if voucher and voucher.usage_limit is not None and with_lock:
voucher = vouchers.select_for_update().get(code=checkout.voucher_code)
return voucher
except Voucher.DoesNotExist:
return None
return None
def get_voucher_for_checkout_info(
checkout_info: "CheckoutInfo", with_lock: bool = False, with_prefetch: bool = False
) -> Optional[Voucher]:
"""Return voucher with voucher code saved in checkout if active or None."""
checkout = checkout_info.checkout
return get_voucher_for_checkout(
checkout,
channel_slug=checkout_info.channel.slug,
with_lock=with_lock,
with_prefetch=with_prefetch,
)
def recalculate_checkout_discount(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
):
"""Recalculate `checkout.discount` based on the voucher.
Will clear both voucher and discount if the discount is no longer
applicable.
"""
checkout = checkout_info.checkout
voucher = get_voucher_for_checkout_info(checkout_info)
if voucher is not None:
address = checkout_info.shipping_address or checkout_info.billing_address
try:
discount = get_voucher_discount_for_checkout(
manager, voucher, checkout_info, lines, address, discounts
)
except NotApplicable:
remove_voucher_from_checkout(checkout)
else:
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
).gross
checkout.discount = (
min(discount, subtotal)
if voucher.type != VoucherType.SHIPPING
else discount
)
checkout.discount_name = voucher.name
checkout.translated_discount_name = (
voucher.translated.name
if voucher.translated.name != voucher.name
else ""
)
checkout.save(
update_fields=[
"translated_discount_name",
"discount_amount",
"discount_name",
"currency",
"last_change",
]
)
else:
remove_voucher_from_checkout(checkout)
def add_promo_code_to_checkout(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
promo_code: str,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Add gift card or voucher data to checkout.
Raise InvalidPromoCode if promo code does not match to any voucher or gift card.
"""
if promo_code_is_voucher(promo_code):
add_voucher_code_to_checkout(
manager, checkout_info, lines, promo_code, discounts
)
elif promo_code_is_gift_card(promo_code):
user_email = cast(str, checkout_info.get_customer_email())
add_gift_card_code_to_checkout(
checkout_info.checkout,
user_email,
promo_code,
checkout_info.channel.currency_code,
)
else:
raise InvalidPromoCode()
def add_voucher_code_to_checkout(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher_code: str,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Add voucher data to checkout by code.
Raise InvalidPromoCode() if voucher of given type cannot be applied.
"""
try:
voucher = Voucher.objects.active_in_channel(
date=timezone.now(), channel_slug=checkout_info.channel.slug
).get(code=voucher_code)
except Voucher.DoesNotExist:
raise InvalidPromoCode()
try:
add_voucher_to_checkout(manager, checkout_info, lines, voucher, discounts)
except NotApplicable:
raise ValidationError(
{
"promo_code": ValidationError(
"Voucher is not applicable to that checkout.",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE.value,
)
}
)
def add_voucher_to_checkout(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher: Voucher,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Add voucher data to checkout.
Raise NotApplicable if voucher of given type cannot be applied.
"""
checkout = checkout_info.checkout
address = checkout_info.shipping_address or checkout_info.billing_address
discount = get_voucher_discount_for_checkout(
manager, voucher, checkout_info, lines, address, discounts
)
checkout.voucher_code = voucher.code
checkout.discount_name = voucher.name
checkout.translated_discount_name = (
voucher.translated.name if voucher.translated.name != voucher.name else ""
)
checkout.discount = discount
checkout.save(
update_fields=[
"voucher_code",
"discount_name",
"translated_discount_name",
"discount_amount",
"last_change",
]
)
def remove_promo_code_from_checkout(checkout_info: "CheckoutInfo", promo_code: str):
"""Remove gift card or voucher data from checkout."""
if promo_code_is_voucher(promo_code):
remove_voucher_code_from_checkout(checkout_info, promo_code)
elif promo_code_is_gift_card(promo_code):
remove_gift_card_code_from_checkout(checkout_info.checkout, promo_code)
def remove_voucher_code_from_checkout(checkout_info: "CheckoutInfo", voucher_code: str):
"""Remove voucher data from checkout by code."""
existing_voucher = get_voucher_for_checkout_info(checkout_info)
if existing_voucher and existing_voucher.code == voucher_code:
remove_voucher_from_checkout(checkout_info.checkout)
def remove_voucher_from_checkout(checkout: Checkout):
"""Remove voucher data from checkout."""
checkout.voucher_code = None
checkout.discount_name = None
checkout.translated_discount_name = None
checkout.discount_amount = 0
checkout.save(
update_fields=[
"voucher_code",
"discount_name",
"translated_discount_name",
"discount_amount",
"currency",
"last_change",
]
)
def get_valid_shipping_methods_for_checkout(
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
subtotal: "TaxedMoney",
country_code: Optional[str] = None,
):
if not is_shipping_required(lines):
return None
if not checkout_info.shipping_address:
return None
return ShippingMethod.objects.applicable_shipping_methods_for_instance(
checkout_info.checkout,
channel_id=checkout_info.checkout.channel_id,
price=subtotal.gross,
country_code=country_code, # type: ignore
lines=lines,
)
def get_valid_collection_points_for_checkout(
lines: Iterable["CheckoutLineInfo"],
country_code: Optional[str] = None,
quantity_check: bool = True,
):
"""Return a collection of `Warehouse`s that can be used as a collection point.
Note that `quantity_check=False` should be used, when stocks quantity will
be validated in further steps (checkout completion) in order to raise
'InsufficientProductStock' error instead of 'InvalidShippingError'.
"""
if not is_shipping_required(lines):
return []
if not country_code:
return []
line_ids = [line_info.line.id for line_info in lines]
lines = CheckoutLine.objects.filter(id__in=line_ids)
return (
Warehouse.objects.applicable_for_click_and_collect(lines, country_code)
if quantity_check
else Warehouse.objects.applicable_for_click_and_collect_no_quantity_check(
lines, country_code
)
)
def clear_delivery_method(checkout_info: "CheckoutInfo"):
checkout = checkout_info.checkout
checkout.collection_point = None
checkout.shipping_method = None
update_checkout_info_delivery_method(checkout_info, None)
delete_external_shipping_id(checkout=checkout)
checkout.save(
update_fields=[
"shipping_method",
"collection_point",
"private_metadata",
"last_change",
]
)
def is_fully_paid(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
):
"""Check if provided payment methods cover the checkout's total amount.
Note that these payments may not be captured or charged at all.
"""
checkout = checkout_info.checkout
payments = [payment for payment in checkout.payments.all() if payment.is_active]
total_paid = sum([p.total for p in payments])
address = checkout_info.shipping_address or checkout_info.billing_address
checkout_total = (
calculations.checkout_total(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
)
- checkout.get_total_gift_cards_balance()
)
checkout_total = max(
checkout_total, zero_taxed_money(checkout_total.currency)
).gross
return total_paid >= checkout_total.amount
def cancel_active_payments(checkout: Checkout):
checkout.payments.filter(is_active=True).update(is_active=False)
def is_shipping_required(lines: Iterable["CheckoutLineInfo"]):
"""Check if shipping is required for given checkout lines."""
return any(
line_info.product.product_type.is_shipping_required for line_info in lines
)
def validate_variants_in_checkout_lines(lines: Iterable["CheckoutLineInfo"]):
variants_listings_map = {line.variant.id: line.channel_listing for line in lines}
not_available_variants = [
variant_id
for variant_id, channel_listing in variants_listings_map.items()
if channel_listing is None or channel_listing.price is None
]
if not_available_variants:
not_available_variants_ids = {
graphene.Node.to_global_id("ProductVariant", pk)
for pk in not_available_variants
}
error_code = CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL
raise ValidationError(
{
"lines": ValidationError(
"Cannot add lines with unavailable variants.",
code=error_code, # type: ignore
params={"variants": not_available_variants_ids},
)
}
)
def set_external_shipping_id(checkout: Checkout, app_shipping_id: str):
checkout.store_value_in_private_metadata(
{PRIVATE_META_APP_SHIPPING_ID: app_shipping_id}
)
def get_external_shipping_id(container: Union["Checkout", "Order"]):
return container.get_value_from_private_metadata(PRIVATE_META_APP_SHIPPING_ID)
def delete_external_shipping_id(checkout: Checkout):
checkout.delete_value_from_private_metadata(PRIVATE_META_APP_SHIPPING_ID)
|
document.getElementById('go').onclick = function() {
var html = atob(ENCODED_HTML_TO_REPLACE).replace(
/MONACO_BASE_URL/g,
document.getElementById('monaco-base-url').value
);
console.log(html);
window.document.write(html);
};
// Note: HTML taken from https://github.com/Microsoft/monaco-editor-samples/blob/master/browser-script-editor/index.html,
// and substituting in "../node_modules/monaco-editor/" with the generic "MONACO_BASE_URL" string,
// which in turn is substituted later.
// Also added a few tiny adjustments to title and container style
// And added the section for IntelliSense, following bug https://github.com/OfficeDev/script-lab/issues/514
// The original text, run through a base64 encoder, is below.
var ENCODED_HTML_TO_REPLACE =
'PCFET0NUWVBFIGh0bWw+CjxodG1sPgogIDxoZWFkPgogICAgPG1ldGEgaHR0cC1lcXVpdj0iWC1VQS1Db21wYXRpYmxlIiBjb250ZW50PSJJRT1lZGdlIiAvPgogICAgPG1ldGEgaHR0cC1lcXVpdj0iQ29udGVudC1UeXBlIiBjb250ZW50PSJ0ZXh0L2h0bWw7Y2hhcnNldD11dGYtOCIgLz4KICAgIDxsaW5rCiAgICAgIHJlbD0ic3R5bGVzaGVldCIKICAgICAgZGF0YS1uYW1lPSJ2cy9lZGl0b3IvZWRpdG9yLm1haW4iCiAgICAgIGhyZWY9Ik1PTkFDT19CQVNFX1VSTC9lZGl0b3IvZWRpdG9yLm1haW4uY3NzIgogICAgLz4KICA8L2hlYWQ+CiAgPGJvZHk+CiAgICA8aDI+TW9uYWNvIEVkaXRvciBTeW5jIExvYWRpbmcgU2FtcGxlLCBNT05BQ09fQkFTRV9VUkw8L2gyPgogICAgPGRpdiBpZD0iY29udGFpbmVyIiBzdHlsZT0id2lkdGg6MTAwJTtoZWlnaHQ6NjAwcHg7Ym9yZGVyOjFweCBzb2xpZCBncmV5Ij48L2Rpdj4KCiAgICA8c2NyaXB0PgogICAgICB2YXIgcmVxdWlyZSA9IHsgcGF0aHM6IHsgdnM6ICdNT05BQ09fQkFTRV9VUkwnIH0gfTsKICAgIDwvc2NyaXB0PgogICAgPHNjcmlwdCBzcmM9Ik1PTkFDT19CQVNFX1VSTC9sb2FkZXIuanMiPjwvc2NyaXB0PgogICAgPHNjcmlwdCBzcmM9Ik1PTkFDT19CQVNFX1VSTC9lZGl0b3IvZWRpdG9yLm1haW4ubmxzLmpzIj48L3NjcmlwdD4KICAgIDxzY3JpcHQgc3JjPSJNT05BQ09fQkFTRV9VUkwvZWRpdG9yL2VkaXRvci5tYWluLmpzIj48L3NjcmlwdD4KCiAgICA8c2NyaXB0PgogICAgICB2YXIgZWRpdG9yID0gbW9uYWNvLmVkaXRvci5jcmVhdGUoZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQoJ2NvbnRhaW5lcicpLCB7CiAgICAgICAgdmFsdWU6IFsnZnVuY3Rpb24geCgpIHsnLCAnXHRjb25zb2xlLmxvZygiSGVsbG8gd29ybGQhIik7JywgJ30nXS5qb2luKCdcbicpLAogICAgICAgIGxhbmd1YWdlOiAndHlwZXNjcmlwdCcsCiAgICAgIH0pOwogICAgICBpZiAod2luZG93LmZldGNoKSB7CiAgICAgICAgdmFyIHVybCA9ICdodHRwczovL3VucGtnLmNvbS9AbWljcm9zb2Z0L29mZmljZS1qcy1oZWxwZXJzQDAuNy40L2Rpc3Qvb2ZmaWNlLmhlbHBlcnMuZC50cyc7CiAgICAgICAgZmV0Y2godXJsKQogICAgICAgICAgLnRoZW4oZnVuY3Rpb24ocmVzcG9uc2UpIHsKICAgICAgICAgICAgcmV0dXJuIHJlc3BvbnNlLnRleHQoKS50aGVuKGZ1bmN0aW9uKHRleHQpIHsKICAgICAgICAgICAgICBjb25zb2xlLmxvZygiQWRkZWQgSW50ZWxsaVNlbnNlIGZvciAiICsgdXJsKTsKICAgICAgICAgICAgICBtb25hY28ubGFuZ3VhZ2VzLnR5cGVzY3JpcHQudHlwZXNjcmlwdERlZmF1bHRzLmFkZEV4dHJhTGliKHRleHQsIHVybCk7CiAgICAgICAgICAgIH0pOwogICAgICAgICAgfSkKICAgICAgICAgIC5jYXRjaChmdW5jdGlvbihlKSB7CiAgICAgICAgICAgIGNvbnNvbGUuZXJyb3IoZSk7CiAgICAgICAgICB9KTsKICAgICAgfQogICAgPC9zY3JpcHQ+CiAgPC9ib2R5Pgo8L2h0bWw+';
/*
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<link
rel="stylesheet"
data-name="vs/editor/editor.main"
href="MONACO_BASE_URL/editor/editor.main.css"
/>
</head>
<body>
<h2>Monaco Editor Sync Loading Sample, MONACO_BASE_URL</h2>
<div id="container" style="width:100%;height:600px;border:1px solid grey"></div>
<script>
var require = { paths: { vs: 'MONACO_BASE_URL' } };
</script>
<script src="MONACO_BASE_URL/loader.js"></script>
<script src="MONACO_BASE_URL/editor/editor.main.nls.js"></script>
<script src="MONACO_BASE_URL/editor/editor.main.js"></script>
<script>
var editor = monaco.editor.create(document.getElementById('container'), {
value: ['function x() {', '\tconsole.log("Hello world!");', '}'].join('\n'),
language: 'typescript',
});
if (window.fetch) {
var url = 'https://unpkg.com/@microsoft/[email protected]/dist/office.helpers.d.ts';
fetch(url)
.then(function(response) {
return response.text().then(function(text) {
console.log("Added IntelliSense for " + url);
monaco.languages.typescript.typescriptDefaults.addExtraLib(text, url);
});
})
.catch(function(e) {
console.error(e);
});
}
</script>
</body>
</html>
*/
|
# Copyright 2013 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Implementation of frequency (count) matrices, position-weight matrices,
and position-specific scoring matrices.
"""
import math
import platform
from Bio._py3k import range
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
class GenericPositionMatrix(dict):
def __init__(self, alphabet, values):
self.length = None
for letter in alphabet.letters:
if self.length is None:
self.length = len(values[letter])
elif self.length != len(values[letter]):
raise Exception("data has inconsistent lengths")
self[letter] = list(values[letter])
self.alphabet = alphabet
self._letters = sorted(self.alphabet.letters)
def __str__(self):
words = ["%6d" % i for i in range(self.length)]
line = " " + " ".join(words)
lines = [line]
for letter in self._letters:
words = ["%6.2f" % value for value in self[letter]]
line = "%c: " % letter + " ".join(words)
lines.append(line)
text = "\n".join(lines) + "\n"
return text
def __getitem__(self, key):
if isinstance(key, tuple):
if len(key) == 2:
key1, key2 = key
if isinstance(key1, slice):
start1, stop1, stride1 = key1.indices(len(self._letters))
indices1 = range(start1, stop1, stride1)
letters1 = [self._letters[i] for i in indices1]
dim1 = 2
elif isinstance(key1, int):
letter1 = self._letters[key1]
dim1 = 1
elif isinstance(key1, tuple):
letters1 = [self._letters[i] for i in key1]
dim1 = 2
elif isinstance(key1, str):
if len(key1) == 1:
letter1 = key1
dim1 = 1
else:
raise KeyError(key1)
else:
raise KeyError("Cannot understand key %s", str(key1))
if isinstance(key2, slice):
start2, stop2, stride2 = key2.indices(self.length)
indices2 = range(start2, stop2, stride2)
dim2 = 2
elif isinstance(key2, int):
index2 = key2
dim2 = 1
else:
raise KeyError("Cannot understand key %s", str(key2))
if dim1 == 1 and dim2 == 1:
return dict.__getitem__(self, letter1)[index2]
elif dim1 == 1 and dim2 == 2:
values = dict.__getitem__(self, letter1)
return tuple(values[index2] for index2 in indices2)
elif dim1 == 2 and dim2 == 1:
d = {}
for letter1 in letters1:
d[letter1] = dict.__getitem__(self, letter1)[index2]
return d
else:
d = {}
for letter1 in letters1:
values = dict.__getitem__(self, letter1)
d[letter1] = [values[index2] for index2 in indices2]
if sorted(letters1) == self._letters:
return self.__class__(self.alphabet, d)
else:
return d
elif len(key) == 1:
key = key[0]
else:
raise KeyError("keys should be 1- or 2-dimensional")
if isinstance(key, slice):
start, stop, stride = key.indices(len(self._letters))
indices = range(start, stop, stride)
letters = [self._letters[i] for i in indices]
dim = 2
elif isinstance(key, int):
letter = self._letters[key]
dim = 1
elif isinstance(key, tuple):
letters = [self._letters[i] for i in key]
dim = 2
elif isinstance(key, str):
if len(key) == 1:
letter = key
dim = 1
else:
raise KeyError(key)
else:
raise KeyError("Cannot understand key %s", str(key))
if dim == 1:
return dict.__getitem__(self, letter)
elif dim == 2:
d = {}
for letter in letters:
d[letter] = dict.__getitem__(self, letter)
return d
else:
raise RuntimeError("Should not get here")
@property
def consensus(self):
"""Returns the consensus sequence."""
sequence = ""
for i in range(self.length):
try:
maximum = float("-inf")
except ValueError:
# On Python 2.5 or older that was handled in C code,
# and failed on Windows XP 32bit
maximum = - 1E400
for letter in self.alphabet.letters:
count = self[letter][i]
if count > maximum:
maximum = count
sequence_letter = letter
sequence += sequence_letter
return Seq(sequence, self.alphabet)
@property
def anticonsensus(self):
sequence = ""
for i in range(self.length):
try:
minimum = float("inf")
except ValueError:
# On Python 2.5 or older that was handled in C code,
# and failed on Windows XP 32bit
minimum = 1E400
for letter in self.alphabet.letters:
count = self[letter][i]
if count < minimum:
minimum = count
sequence_letter = letter
sequence += sequence_letter
return Seq(sequence, self.alphabet)
@property
def degenerate_consensus(self):
# Following the rules adapted from
# D. R. Cavener: "Comparison of the consensus sequence flanking
# translational start sites in Drosophila and vertebrates."
# Nucleic Acids Research 15(4): 1353-1361. (1987).
# The same rules are used by TRANSFAC.
degenerate_nucleotide = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'AC': 'M',
'AG': 'R',
'AT': 'W',
'CG': 'S',
'CT': 'Y',
'GT': 'K',
'ACG': 'V',
'ACT': 'H',
'AGT': 'D',
'CGT': 'B',
'ACGT': 'N',
}
sequence = ""
for i in range(self.length):
def get(nucleotide):
return self[nucleotide][i]
nucleotides = sorted(self, key=get, reverse=True)
counts = [self[c][i] for c in nucleotides]
# Follow the Cavener rules:
if counts[0] >= sum(counts[1:]) and counts[0] >= 2 * counts[1]:
key = nucleotides[0]
elif 4 * sum(counts[:2]) > 3 * sum(counts):
key = "".join(sorted(nucleotides[:2]))
elif counts[3] == 0:
key = "".join(sorted(nucleotides[:3]))
else:
key = "ACGT"
nucleotide = degenerate_nucleotide[key]
sequence += nucleotide
return Seq(sequence, alphabet=IUPAC.ambiguous_dna)
@property
def gc_content(self):
"""Compute the fraction GC content."""
alphabet = self.alphabet
gc_total = 0.0
total = 0.0
for i in range(self.length):
for letter in alphabet.letters:
if letter in 'CG':
gc_total += self[letter][i]
total += self[letter][i]
return gc_total / total
def reverse_complement(self):
values = {}
values["A"] = self["T"][::-1]
values["T"] = self["A"][::-1]
values["G"] = self["C"][::-1]
values["C"] = self["G"][::-1]
alphabet = self.alphabet
return self.__class__(alphabet, values)
class FrequencyPositionMatrix(GenericPositionMatrix):
def normalize(self, pseudocounts=None):
"""Create and return a position-weight matrix by normalizing the counts matrix.
If pseudocounts is None (default), no pseudocounts are added
to the counts.
If pseudocounts is a number, it is added to the counts before
calculating the position-weight matrix.
Alternatively, the pseudocounts can be a dictionary with a key
for each letter in the alphabet associated with the motif.
"""
counts = {}
if pseudocounts is None:
for letter in self.alphabet.letters:
counts[letter] = [0.0] * self.length
elif isinstance(pseudocounts, dict):
for letter in self.alphabet.letters:
counts[letter] = [float(pseudocounts[letter])] * self.length
else:
for letter in self.alphabet.letters:
counts[letter] = [float(pseudocounts)] * self.length
for i in range(self.length):
for letter in self.alphabet.letters:
counts[letter][i] += self[letter][i]
# Actual normalization is done in the PositionWeightMatrix initializer
return PositionWeightMatrix(self.alphabet, counts)
class PositionWeightMatrix(GenericPositionMatrix):
def __init__(self, alphabet, counts):
GenericPositionMatrix.__init__(self, alphabet, counts)
for i in range(self.length):
total = sum(float(self[letter][i]) for letter in alphabet.letters)
for letter in alphabet.letters:
self[letter][i] /= total
for letter in alphabet.letters:
self[letter] = tuple(self[letter])
def log_odds(self, background=None):
"""Returns the Position-Specific Scoring Matrix.
The Position-Specific Scoring Matrix (PSSM) contains the log-odds
scores computed from the probability matrix and the background
probabilities. If the background is None, a uniform background
distribution is assumed.
"""
values = {}
alphabet = self.alphabet
if background is None:
background = dict.fromkeys(self._letters, 1.0)
else:
background = dict(background)
total = sum(background.values())
for letter in alphabet.letters:
background[letter] /= total
values[letter] = []
for i in range(self.length):
for letter in alphabet.letters:
b = background[letter]
if b > 0:
p = self[letter][i]
if p > 0:
logodds = math.log(p / b, 2)
else:
# TODO - Ensure this has unittest coverage!
try:
logodds = float("-inf")
except ValueError:
# On Python 2.5 or older that was handled in C code,
# and failed on Windows XP 32bit
logodds = - 1E400
else:
p = self[letter][i]
if p > 0:
logodds = float("inf")
else:
logodds = float("nan")
values[letter].append(logodds)
pssm = PositionSpecificScoringMatrix(alphabet, values)
return pssm
class PositionSpecificScoringMatrix(GenericPositionMatrix):
# Make sure that we use C-accelerated PWM calculations if running under CPython.
# Fall back to the slower Python implementation if Jython or IronPython.
try:
from . import _pwm
def _calculate(self, sequence, m, n):
logodds = [[self[letter][i] for letter in "ACGT"] for i in range(m)]
return self._pwm.calculate(sequence, logodds)
except ImportError:
if platform.python_implementation() == 'CPython':
raise
else:
def _calculate(self, sequence, m, n):
# The C code handles mixed case so Python version must too:
sequence = sequence.upper()
scores = []
for i in range(n - m + 1):
score = 0.0
for position in range(m):
letter = sequence[i + position]
try:
score += self[letter][position]
except KeyError:
score = float("nan")
break
scores.append(score)
return scores
def calculate(self, sequence):
"""Returns the PWM score for a given sequence for all positions.
Notes:
- the sequence can only be a DNA sequence
- the search is performed only on one strand
- if the sequence and the motif have the same length, a single
number is returned
- otherwise, the result is a one-dimensional list or numpy array
"""
# TODO - Code itself tolerates ambiguous bases (as NaN).
if not isinstance(self.alphabet, IUPAC.IUPACUnambiguousDNA):
raise ValueError("PSSM has wrong alphabet: %s - Use only with DNA motifs"
% self.alphabet)
if not isinstance(sequence.alphabet, IUPAC.IUPACUnambiguousDNA):
raise ValueError("Sequence has wrong alphabet: %r - Use only with DNA sequences"
% sequence.alphabet)
# TODO - Force uppercase here and optimise switch statement in C
# by assuming upper case?
sequence = str(sequence)
m = self.length
n = len(sequence)
scores = self._calculate(sequence, m, n)
if len(scores) == 1:
return scores[0]
else:
return scores
def search(self, sequence, threshold=0.0, both=True):
"""Find hits with PWM score above given threshold.
A generator function, returning found hits in the given sequence
with the pwm score higher than the threshold.
"""
sequence = sequence.upper()
n = len(sequence)
m = self.length
if both:
rc = self.reverse_complement()
for position in range(0, n - m + 1):
s = sequence[position:position + m]
score = self.calculate(s)
if score > threshold:
yield (position, score)
if both:
score = rc.calculate(s)
if score > threshold:
yield (position - n, score)
@property
def max(self):
"""Maximal possible score for this motif.
returns the score computed for the consensus sequence.
"""
score = 0.0
letters = self._letters
for position in range(0, self.length):
score += max(self[letter][position] for letter in letters)
return score
@property
def min(self):
"""Minimal possible score for this motif.
returns the score computed for the anticonsensus sequence.
"""
score = 0.0
letters = self._letters
for position in range(0, self.length):
score += min(self[letter][position] for letter in letters)
return score
@property
def gc_content(self):
raise Exception("Cannot compute the %GC composition of a PSSM")
def mean(self, background=None):
"""Expected value of the score of a motif."""
if background is None:
background = dict.fromkeys(self._letters, 1.0)
else:
background = dict(background)
total = sum(background.values())
for letter in self._letters:
background[letter] /= total
sx = 0.0
for i in range(self.length):
for letter in self._letters:
logodds = self[letter, i]
if math.isnan(logodds):
continue
if math.isinf(logodds) and logodds < 0:
continue
b = background[letter]
p = b * math.pow(2, logodds)
sx += p * logodds
return sx
def std(self, background=None):
"""Standard deviation of the score of a motif."""
if background is None:
background = dict.fromkeys(self._letters, 1.0)
else:
background = dict(background)
total = sum(background.values())
for letter in self._letters:
background[letter] /= total
variance = 0.0
for i in range(self.length):
sx = 0.0
sxx = 0.0
for letter in self._letters:
logodds = self[letter, i]
if math.isnan(logodds):
continue
if math.isinf(logodds) and logodds < 0:
continue
b = background[letter]
p = b * math.pow(2, logodds)
sx += p * logodds
sxx += p * logodds * logodds
sxx -= sx * sx
variance += sxx
variance = max(variance, 0) # to avoid roundoff problems
return math.sqrt(variance)
def dist_pearson(self, other):
"""Return the similarity score based on pearson correlation for the given motif against self.
We use the Pearson's correlation of the respective probabilities.
"""
if self.alphabet != other.alphabet:
raise ValueError("Cannot compare motifs with different alphabets")
max_p = -2
for offset in range(-self.length + 1, other.length):
if offset < 0:
p = self.dist_pearson_at(other, -offset)
else: # offset>=0
p = other.dist_pearson_at(self, offset)
if max_p < p:
max_p = p
max_o = -offset
return 1 - max_p, max_o
def dist_pearson_at(self, other, offset):
letters = self._letters
sx = 0.0 # \sum x
sy = 0.0 # \sum y
sxx = 0.0 # \sum x^2
sxy = 0.0 # \sum x \cdot y
syy = 0.0 # \sum y^2
norm = max(self.length, offset + other.length) * len(letters)
for pos in range(min(self.length - offset, other.length)):
xi = [self[letter, pos + offset] for letter in letters]
yi = [other[letter, pos] for letter in letters]
sx += sum(xi)
sy += sum(yi)
sxx += sum(x * x for x in xi)
sxy += sum(x * y for x, y in zip(xi, yi))
syy += sum(y * y for y in yi)
sx /= norm
sy /= norm
sxx /= norm
sxy /= norm
syy /= norm
numerator = sxy - sx * sy
denominator = math.sqrt((sxx - sx * sx) * (syy - sy * sy))
return numerator / denominator
def distribution(self, background=None, precision=10 ** 3):
"""calculate the distribution of the scores at the given precision."""
from .thresholds import ScoreDistribution
if background is None:
background = dict.fromkeys(self._letters, 1.0)
else:
background = dict(background)
total = sum(background.values())
for letter in self._letters:
background[letter] /= total
return ScoreDistribution(precision=precision, pssm=self, background=background)
|
import asyncio
import logging
import struct
from ipaddress import IPv4Address
from .node import Node, NodeHeap
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
class SpiderCrawl:
"""
Crawl the network and look for given 160-bit keys.
"""
def __init__(self, protocol, node, peers, ksize, alpha):
"""
Create a new C{SpiderCrawl}er.
Args:
protocol: A :class:`~magnet2torrent.dht.protocol.KRPCProtocol` instance.
node: A :class:`~magnet2torrent.dht.node.Node` representing the key we're
looking for
peers: A list of :class:`~magnet2torrent.dht.node.Node` instances that
provide the entry point for the network
ksize: The value for k based on the paper
alpha: The value for alpha based on the paper
"""
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
self.nearest = NodeHeap(self.node, self.ksize)
self.last_ids_crawled = []
self.cancel_crawl = False
self.crawl_finished = False
log.info("creating spider with peers: %s", peers)
self.nearest.push(peers)
async def _find(self, rpcmethod):
log.info("crawling network with nearest: %s", str(tuple(self.nearest)))
tasks = set()
task_mapping = {}
while not self.cancel_crawl and (
not self.nearest.have_contacted_all() or tasks
):
count = self.alpha - len(tasks)
for peer in self.nearest.get_uncontacted()[:count]:
self.nearest.mark_contacted(peer)
task = asyncio.ensure_future(rpcmethod(peer, self.node))
task_mapping[task] = peer.id
tasks.add(task)
done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
done_mapping = {}
for task in done:
done_mapping[task_mapping.pop(task)] = task.result()
await self._nodes_found(done_mapping)
self.crawl_finished = True
for task in tasks:
task.cancel()
return await self._return_value()
async def _return_value(self):
raise NotImplementedError
async def _nodes_found(self, responses):
raise NotImplementedError
class PeerSpiderCrawl(SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha, queue):
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
self._queue = queue
async def find(self):
"""
Find either the closest nodes or the value requested.
"""
return await self._find(self.protocol.call_get_peers)
async def _nodes_found(self, responses):
"""
Handle the result of an iteration in _find.
"""
if self.cancel_crawl:
return
toremove = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
elif response.has_value():
await self._queue.put(response.get_values())
else:
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
async def _return_value(self):
await self._queue.put([])
return
class NodeSpiderCrawl(SpiderCrawl):
async def find(self):
"""
Find the closest nodes.
"""
return await self._find(self.protocol.call_find_node)
async def _nodes_found(self, responses):
"""
Handle the result of an iteration in _find.
"""
toremove = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
else:
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
async def _return_value(self):
return list(self.nearest)
class RPCFindResponse:
def __init__(self, response):
"""
A wrapper for the result of a RPC find.
"""
self.response = response
def happened(self):
"""
Did the other host actually respond?
"""
return self.response[0]
def has_value(self):
return b"values" in self.response[1]
def get_values(self):
peers = []
for value in self.response[1].get(b"values", []):
peer_ip, peer_port = struct.unpack("!IH", value)
peers.append((IPv4Address(peer_ip), peer_port))
return peers
def get_node_list(self):
"""
Get the node list in the response. If there's no value, this should
be set.
"""
response = self.response[1].get(b"nodes")
nodelist = []
while response and len(response) >= 26:
peer_id = response[:20]
peer_ip, peer_port = struct.unpack("!IH", response[20:26])
node = Node(peer_id, str(IPv4Address(peer_ip)), peer_port)
nodelist.append(node)
response = response[26:]
return nodelist
|
import pytest
from logic4e import *
from utils4e import expr_handle_infix_ops, count, Symbol
definite_clauses_KB = PropDefiniteKB()
for clause in ['(B & F)==>E', '(A & E & F)==>G', '(B & C)==>F', '(A & B)==>D', '(E & F)==>H', '(H & I)==>J', 'A', 'B', 'C']:
definite_clauses_KB.tell(expr(clause))
def test_is_symbol():
assert is_symbol('x')
assert is_symbol('X')
assert is_symbol('N245')
assert not is_symbol('')
assert not is_symbol('1L')
assert not is_symbol([1, 2, 3])
def test_is_var_symbol():
assert is_var_symbol('xt')
assert not is_var_symbol('Txt')
assert not is_var_symbol('')
assert not is_var_symbol('52')
def test_is_prop_symbol():
assert not is_prop_symbol('xt')
assert is_prop_symbol('Txt')
assert not is_prop_symbol('')
assert not is_prop_symbol('52')
def test_variables():
assert variables(expr('F(x, x) & G(x, y) & H(y, z) & R(A, z, 2)')) == {x, y, z}
assert variables(expr('(x ==> y) & B(x, y) & A')) == {x, y}
def test_expr():
assert repr(expr('P <=> Q(1)')) == '(P <=> Q(1))'
assert repr(expr('P & Q | ~R(x, F(x))')) == '((P & Q) | ~R(x, F(x)))'
assert (expr_handle_infix_ops('P & Q ==> R & ~S')
== "P & Q |'==>'| R & ~S")
def test_extend():
assert extend({x: 1}, y, 2) == {x: 1, y: 2}
def test_subst():
assert subst({x: 42, y:0}, F(x) + y) == (F(42) + 0)
def test_PropKB():
kb = PropKB()
assert count(kb.ask(expr) for expr in [A, C, D, E, Q]) is 0
kb.tell(A & E)
assert kb.ask(A) == kb.ask(E) == {}
kb.tell(E |'==>'| C)
assert kb.ask(C) == {}
kb.retract(E)
assert kb.ask(E) is False
assert kb.ask(C) is False
def test_wumpus_kb():
# Statement: There is no pit in [1,1].
assert wumpus_kb.ask(~P11) == {}
# Statement: There is no pit in [1,2].
assert wumpus_kb.ask(~P12) == {}
# Statement: There is a pit in [2,2].
assert wumpus_kb.ask(P22) is False
# Statement: There is a pit in [3,1].
assert wumpus_kb.ask(P31) is False
# Statement: Neither [1,2] nor [2,1] contains a pit.
assert wumpus_kb.ask(~P12 & ~P21) == {}
# Statement: There is a pit in either [2,2] or [3,1].
assert wumpus_kb.ask(P22 | P31) == {}
def test_is_definite_clause():
assert is_definite_clause(expr('A & B & C & D ==> E'))
assert is_definite_clause(expr('Farmer(Mac)'))
assert not is_definite_clause(expr('~Farmer(Mac)'))
assert is_definite_clause(expr('(Farmer(f) & Rabbit(r)) ==> Hates(f, r)'))
assert not is_definite_clause(expr('(Farmer(f) & ~Rabbit(r)) ==> Hates(f, r)'))
assert not is_definite_clause(expr('(Farmer(f) | Rabbit(r)) ==> Hates(f, r)'))
def test_parse_definite_clause():
assert parse_definite_clause(expr('A & B & C & D ==> E')) == ([A, B, C, D], E)
assert parse_definite_clause(expr('Farmer(Mac)')) == ([], expr('Farmer(Mac)'))
assert parse_definite_clause(expr('(Farmer(f) & Rabbit(r)) ==> Hates(f, r)')) == ([expr('Farmer(f)'), expr('Rabbit(r)')], expr('Hates(f, r)'))
def test_pl_true():
assert pl_true(P, {}) is None
assert pl_true(P, {P: False}) is False
assert pl_true(P | Q, {P: True}) is True
assert pl_true((A | B) & (C | D), {A: False, B: True, D: True}) is True
assert pl_true((A & B) & (C | D), {A: False, B: True, D: True}) is False
assert pl_true((A & B) | (A & C), {A: False, B: True, C: True}) is False
assert pl_true((A | B) & (C | D), {A: True, D: False}) is None
assert pl_true(P | P, {}) is None
def test_tt_true():
assert tt_true(P | ~P)
assert tt_true('~~P <=> P')
assert not tt_true((P | ~Q) & (~P | Q))
assert not tt_true(P & ~P)
assert not tt_true(P & Q)
assert tt_true((P | ~Q) | (~P | Q))
assert tt_true('(A & B) ==> (A | B)')
assert tt_true('((A & B) & C) <=> (A & (B & C))')
assert tt_true('((A | B) | C) <=> (A | (B | C))')
assert tt_true('(A ==> B) <=> (~B ==> ~A)')
assert tt_true('(A ==> B) <=> (~A | B)')
assert tt_true('(A <=> B) <=> ((A ==> B) & (B ==> A))')
assert tt_true('~(A & B) <=> (~A | ~B)')
assert tt_true('~(A | B) <=> (~A & ~B)')
assert tt_true('(A & (B | C)) <=> ((A & B) | (A & C))')
assert tt_true('(A | (B & C)) <=> ((A | B) & (A | C))')
def test_dpll():
assert (dpll_satisfiable(A & ~B & C & (A | ~D) & (~E | ~D) & (C | ~D) & (~A | ~F) & (E | ~F)
& (~D | ~F) & (B | ~C | D) & (A | ~E | F) & (~A | E | D))
== {B: False, C: True, A: True, F: False, D: True, E: False})
assert dpll_satisfiable(A & B & ~C & D) == {C: False, A: True, D: True, B: True}
assert dpll_satisfiable((A | (B & C)) |'<=>'| ((A | B) & (A | C))) == {C: True, A: True} or {C: True, B: True}
assert dpll_satisfiable(A |'<=>'| B) == {A: True, B: True}
assert dpll_satisfiable(A & ~B) == {A: True, B: False}
assert dpll_satisfiable(P & ~P) is False
def test_find_pure_symbol():
assert find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A]) == (A, True)
assert find_pure_symbol([A, B, C], [~A|~B,~B|~C,C|A]) == (B, False)
assert find_pure_symbol([A, B, C], [~A|B,~B|~C,C|A]) == (None, None)
def test_unit_clause_assign():
assert unit_clause_assign(A|B|C, {A:True}) == (None, None)
assert unit_clause_assign(B|C, {A:True}) == (None, None)
assert unit_clause_assign(B|~A, {A:True}) == (B, True)
def test_find_unit_clause():
assert find_unit_clause([A|B|C, B|~C, ~A|~B], {A:True}) == (B, False)
def test_unify():
assert unify(x, x, {}) == {}
assert unify(x, 3, {}) == {x: 3}
assert unify(x & 4 & y, 6 & y & 4, {}) == {x: 6, y: 4}
assert unify(expr('A(x)'), expr('A(B)')) == {x: B}
assert unify(expr('American(x) & Weapon(B)'), expr('American(A) & Weapon(y)')) == {x: A, y: B}
def test_pl_fc_entails():
assert pl_fc_entails(horn_clauses_KB, expr('Q'))
assert pl_fc_entails(definite_clauses_KB, expr('G'))
assert pl_fc_entails(definite_clauses_KB, expr('H'))
assert not pl_fc_entails(definite_clauses_KB, expr('I'))
assert not pl_fc_entails(definite_clauses_KB, expr('J'))
assert not pl_fc_entails(horn_clauses_KB, expr('SomethingSilly'))
def test_tt_entails():
assert tt_entails(P & Q, Q)
assert not tt_entails(P | Q, Q)
assert tt_entails(A & (B | C) & E & F & ~(P | Q), A & E & F & ~P & ~Q)
assert not tt_entails(P |'<=>'| Q, Q)
assert tt_entails((P |'==>'| Q) & P, Q)
assert not tt_entails((P |'<=>'| Q) & ~P, Q)
def test_prop_symbols():
assert prop_symbols(expr('x & y & z | A')) == {A}
assert prop_symbols(expr('(x & B(z)) ==> Farmer(y) | A')) == {A, expr('Farmer(y)'), expr('B(z)')}
def test_constant_symbols():
assert constant_symbols(expr('x & y & z | A')) == {A}
assert constant_symbols(expr('(x & B(z)) & Father(John) ==> Farmer(y) | A')) == {A, expr('John')}
def test_predicate_symbols():
assert predicate_symbols(expr('x & y & z | A')) == set()
assert predicate_symbols(expr('(x & B(z)) & Father(John) ==> Farmer(y) | A')) == {
('B', 1),
('Father', 1),
('Farmer', 1)}
assert predicate_symbols(expr('(x & B(x, y, z)) & F(G(x, y), x) ==> P(Q(R(x, y)), x, y, z)')) == {
('B', 3),
('F', 2),
('G', 2),
('P', 4),
('Q', 1),
('R', 2)}
def test_eliminate_implications():
assert repr(eliminate_implications('A ==> (~B <== C)')) == '((~B | ~C) | ~A)'
assert repr(eliminate_implications(A ^ B)) == '((A & ~B) | (~A & B))'
assert repr(eliminate_implications(A & B | C & ~D)) == '((A & B) | (C & ~D))'
def test_dissociate():
assert dissociate('&', [A & B]) == [A, B]
assert dissociate('|', [A, B, C & D, P | Q]) == [A, B, C & D, P, Q]
assert dissociate('&', [A, B, C & D, P | Q]) == [A, B, C, D, P | Q]
def test_associate():
assert (repr(associate('&', [(A & B), (B | C), (B & C)]))
== '(A & B & (B | C) & B & C)')
assert (repr(associate('|', [A | (B | (C | (A & B)))]))
== '(A | B | C | (A & B))')
def test_move_not_inwards():
assert repr(move_not_inwards(~(A | B))) == '(~A & ~B)'
assert repr(move_not_inwards(~(A & B))) == '(~A | ~B)'
assert repr(move_not_inwards(~(~(A | ~B) | ~~C))) == '((A | ~B) & ~C)'
def test_distribute_and_over_or():
def test_entailment(s, has_and = False):
result = distribute_and_over_or(s)
if has_and:
assert result.op == '&'
assert tt_entails(s, result)
assert tt_entails(result, s)
test_entailment((A & B) | C, True)
test_entailment((A | B) & C, True)
test_entailment((A | B) | C, False)
test_entailment((A & B) | (C | D), True)
def test_to_cnf():
assert (repr(to_cnf(wumpus_world_inference & ~expr('~P12'))) ==
"((~P12 | B11) & (~P21 | B11) & (P12 | P21 | ~B11) & ~B11 & P12)")
assert repr(to_cnf((P & Q) | (~P & ~Q))) == '((~P | P) & (~Q | P) & (~P | Q) & (~Q | Q))'
assert repr(to_cnf('A <=> B')) == '((A | ~B) & (B | ~A))'
assert repr(to_cnf("B <=> (P1 | P2)")) == '((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))'
assert repr(to_cnf('A <=> (B & C)')) == '((A | ~B | ~C) & (B | ~A) & (C | ~A))'
assert repr(to_cnf("a | (b & c) | d")) == '((b | a | d) & (c | a | d))'
assert repr(to_cnf("A & (B | (D & E))")) == '(A & (D | B) & (E | B))'
assert repr(to_cnf("A | (B | (C | (D & E)))")) == '((D | A | B | C) & (E | A | B | C))'
assert repr(to_cnf('(A <=> ~B) ==> (C | ~D)')) == '((B | ~A | C | ~D) & (A | ~A | C | ~D) & (B | ~B | C | ~D) & (A | ~B | C | ~D))'
def test_pl_resolution():
assert pl_resolution(wumpus_kb, ~P11)
assert pl_resolution(wumpus_kb, ~B11)
assert not pl_resolution(wumpus_kb, P22)
assert pl_resolution(horn_clauses_KB, A)
assert pl_resolution(horn_clauses_KB, B)
assert not pl_resolution(horn_clauses_KB, P)
assert not pl_resolution(definite_clauses_KB, P)
def test_standardize_variables():
e = expr('F(a, b, c) & G(c, A, 23)')
assert len(variables(standardize_variables(e))) == 3
# assert variables(e).intersection(variables(standardize_variables(e))) == {}
assert is_variable(standardize_variables(expr('x')))
def test_fol_bc_ask():
def test_ask(query, kb=None):
q = expr(query)
test_variables = variables(q)
answers = fol_bc_ask(kb or test_kb, q)
return sorted(
[dict((x, v) for x, v in list(a.items()) if x in test_variables)
for a in answers], key=repr)
assert repr(test_ask('Farmer(x)')) == '[{x: Mac}]'
assert repr(test_ask('Human(x)')) == '[{x: Mac}, {x: MrsMac}]'
assert repr(test_ask('Rabbit(x)')) == '[{x: MrsRabbit}, {x: Pete}]'
assert repr(test_ask('Criminal(x)', crime_kb)) == '[{x: West}]'
def test_fol_fc_ask():
def test_ask(query, kb=None):
q = expr(query)
test_variables = variables(q)
answers = fol_fc_ask(kb or test_kb, q)
return sorted(
[dict((x, v) for x, v in list(a.items()) if x in test_variables)
for a in answers], key=repr)
assert repr(test_ask('Criminal(x)', crime_kb)) == '[{x: West}]'
assert repr(test_ask('Enemy(x, America)', crime_kb)) == '[{x: Nono}]'
assert repr(test_ask('Farmer(x)')) == '[{x: Mac}]'
assert repr(test_ask('Human(x)')) == '[{x: Mac}, {x: MrsMac}]'
assert repr(test_ask('Rabbit(x)')) == '[{x: MrsRabbit}, {x: Pete}]'
def test_d():
assert d(x * x - x, x) == 2 * x - 1
def test_WalkSAT():
def check_SAT(clauses, single_solution={}):
# Make sure the solution is correct if it is returned by WalkSat
# Sometimes WalkSat may run out of flips before finding a solution
soln = WalkSAT(clauses)
if soln:
assert all(pl_true(x, soln) for x in clauses)
if single_solution: # Cross check the solution if only one exists
assert all(pl_true(x, single_solution) for x in clauses)
assert soln == single_solution
# Test WalkSat for problems with solution
check_SAT([A & B, A & C])
check_SAT([A | B, P & Q, P & B])
check_SAT([A & B, C | D, ~(D | P)], {A: True, B: True, C: True, D: False, P: False})
check_SAT([A, B, ~C, D], {C: False, A: True, B: True, D: True})
# Test WalkSat for problems without solution
assert WalkSAT([A & ~A], 0.5, 100) is None
assert WalkSAT([A & B, C | D, ~(D | B)], 0.5, 100) is None
assert WalkSAT([A | B, ~A, ~(B | C), C | D, P | Q], 0.5, 100) is None
assert WalkSAT([A | B, B & C, C | D, D & A, P, ~P], 0.5, 100) is None
def test_SAT_plan():
transition = {'A': {'Left': 'A', 'Right': 'B'},
'B': {'Left': 'A', 'Right': 'C'},
'C': {'Left': 'B', 'Right': 'C'}}
assert SAT_plan('A', transition, 'C', 2) is None
assert SAT_plan('A', transition, 'B', 3) == ['Right']
assert SAT_plan('C', transition, 'A', 3) == ['Left', 'Left']
transition = {(0, 0): {'Right': (0, 1), 'Down': (1, 0)},
(0, 1): {'Left': (1, 0), 'Down': (1, 1)},
(1, 0): {'Right': (1, 0), 'Up': (1, 0), 'Left': (1, 0), 'Down': (1, 0)},
(1, 1): {'Left': (1, 0), 'Up': (0, 1)}}
assert SAT_plan((0, 0), transition, (1, 1), 4) == ['Right', 'Down']
if __name__ == '__main__':
pytest.main()
|
# -*- coding: utf-8 -*-
# @Time : 2018/7/28 下午3:52
# @Author : yidxue
|
#pragma once
#include <chrono>
namespace psched {
struct TaskStats {
using TimePoint = std::chrono::steady_clock::time_point;
TimePoint arrival_time; // time point when the task is marked as 'ready' (queued)
TimePoint start_time; // time point when the task is about to execute (dequeued)
TimePoint end_time; // time point when the task completes execution
// Waiting time is the amount of time spent by a task waiting
// in the ready queue for getting the CPU.
template <typename T = std::chrono::milliseconds> long long waiting_time() const {
return std::chrono::duration_cast<T>(start_time - arrival_time).count();
}
// Burst time is the amount of time required by a task for executing on CPU.
// It is also called as execution time or running time.
template <typename T = std::chrono::milliseconds> long long burst_time() const {
return std::chrono::duration_cast<T>(end_time - start_time).count();
}
// Turnaround time (TAT) is the time interval from the time of submission
// of a task to the time of the completion of the task. It can also be
// considered as the sum of the time periods spent waiting to get into memory or
// ready queue, execution on CPU and executing input/output.
//
// waiting_time() + burst_time()
template <typename T = std::chrono::milliseconds> long long turnaround_time() const {
return std::chrono::duration_cast<T>(end_time - arrival_time).count();
}
};
} // namespace psched |
class ConfigRetrieval {
constructor() {}
get includeExtension() {
return atom.config.get('autocomplete-modules.includeExtension');
}
get vendors() {
return atom.config.get('autocomplete-modules.vendors');
}
get webpack() {
return atom.config.get('autocomplete-modules.webpack');
}
get webpackConfigFilename() {
return atom.config.get('autocomplete-modules.webpackConfigFilename');
}
get babelPluginModuleResolver() {
return atom.config.get('autocomplete-modules.babelPluginModuleResolver');
}
}
module.exports = {
retrieval: new ConfigRetrieval(),
registrar: {
includeExtension: {
order: 1,
title: 'Include file extension',
description: "Include the file's extension when filling in the completion.",
type: 'boolean',
default: false
},
vendors: {
order: 2,
title: 'Vendor directories',
description: 'A list of directories to search for modules relative to the project root.',
type: 'array',
default: ['node_modules'],
items: {
type: 'string'
}
},
webpack: {
order: 3,
title: 'Webpack support',
description: 'Attempts to use the given webpack configuration file resolution settings to search for modules.',
type: 'boolean',
default: false
},
webpackConfigFilename: {
order: 4,
title: 'Webpack configuration filename',
description: 'When "Webpack support" is enabled this is the config file used to supply module search paths.',
type: 'string',
default: 'webpack.config.js'
},
babelPluginModuleResolver: {
order: 5,
title: 'Babel Plugin Module Resolver support',
description: 'Use the <a href="https://github.com/tleunen/babel-plugin-module-resolver">Babel Plugin Module Resolver</a> configuration located in your `.babelrc` or in the babel configuration in `package.json`.',
type: 'boolean',
default: false
}
}
};
|
/*
* arch/arm/plat-omap/include/mach/serial.h
*
* Copyright (C) 2009 Texas Instruments
* Added OMAP4 support- Santosh Shilimkar <[email protected]>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ASM_ARCH_SERIAL_H
#define __ASM_ARCH_SERIAL_H
#include <linux/init.h>
/*
* Memory entry used for the DEBUG_LL UART configuration, relative to
* start of RAM. See also uncompress.h and debug-macro.S.
*
* Note that using a memory location for storing the UART configuration
* has at least two limitations:
*
* 1. Kernel uncompress code cannot overlap OMAP_UART_INFO as the
* uncompress code could then partially overwrite itself
* 2. We assume printascii is called at least once before paging_init,
* and addruart has a chance to read OMAP_UART_INFO
*/
#define OMAP_UART_INFO_OFS 0x3ffc
/* OMAP1 serial ports */
#define OMAP1_UART1_BASE 0xfffb0000
#define OMAP1_UART2_BASE 0xfffb0800
#define OMAP1_UART3_BASE 0xfffb9800
/* OMAP2 serial ports */
#define OMAP2_UART1_BASE 0x4806a000
#define OMAP2_UART2_BASE 0x4806c000
#define OMAP2_UART3_BASE 0x4806e000
/* OMAP3 serial ports */
#define OMAP3_UART1_BASE OMAP2_UART1_BASE
#define OMAP3_UART2_BASE OMAP2_UART2_BASE
#define OMAP3_UART3_BASE 0x49020000
#define OMAP3_UART4_BASE 0x49042000 /* Only on 36xx */
#define OMAP3_UART4_AM35XX_BASE 0x4809E000 /* Only on AM35xx */
/* OMAP4 serial ports */
#define OMAP4_UART1_BASE OMAP2_UART1_BASE
#define OMAP4_UART2_BASE OMAP2_UART2_BASE
#define OMAP4_UART3_BASE 0x48020000
#define OMAP4_UART4_BASE 0x4806e000
/* TI81XX serial ports */
#define TI81XX_UART1_BASE 0x48020000
#define TI81XX_UART2_BASE 0x48022000
#define TI81XX_UART3_BASE 0x48024000
/* AM3505/3517 UART4 */
#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */
/* AM33XX serial port */
#define AM33XX_UART1_BASE 0x44E09000
/* OMAP5 serial ports */
#define OMAP5_UART1_BASE OMAP2_UART1_BASE
#define OMAP5_UART2_BASE OMAP2_UART2_BASE
#define OMAP5_UART3_BASE OMAP4_UART3_BASE
#define OMAP5_UART4_BASE OMAP4_UART4_BASE
#define OMAP5_UART5_BASE 0x48066000
#define OMAP5_UART6_BASE 0x48068000
/* External port on Zoom2/3 */
#define ZOOM_UART_BASE 0x10000000
#define ZOOM_UART_VIRT 0xfa400000
#define OMAP_PORT_SHIFT 2
#define OMAP7XX_PORT_SHIFT 0
#define ZOOM_PORT_SHIFT 1
#define OMAP1510_BASE_BAUD (12000000/16)
#define OMAP16XX_BASE_BAUD (48000000/16)
#define OMAP24XX_BASE_BAUD (48000000/16)
/*
* DEBUG_LL port encoding stored into the UART1 scratchpad register by
* decomp_setup in uncompress.h
*/
#define OMAP1UART1 11
#define OMAP1UART2 12
#define OMAP1UART3 13
#define OMAP2UART1 21
#define OMAP2UART2 22
#define OMAP2UART3 23
#define OMAP3UART1 OMAP2UART1
#define OMAP3UART2 OMAP2UART2
#define OMAP3UART3 33
#define OMAP3UART4 34 /* Only on 36xx */
#define OMAP4UART1 OMAP2UART1
#define OMAP4UART2 OMAP2UART2
#define OMAP4UART3 43
#define OMAP4UART4 44
#define TI81XXUART1 81
#define TI81XXUART2 82
#define TI81XXUART3 83
#define AM33XXUART1 84
#define OMAP5UART3 OMAP4UART3
#define OMAP5UART4 OMAP4UART4
#define ZOOM_UART 95 /* Only on zoom2/3 */
/* This is only used by 8250.c for omap1510 */
#define is_omap_port(pt) ({int __ret = 0; \
if ((pt)->port.mapbase == OMAP1_UART1_BASE || \
(pt)->port.mapbase == OMAP1_UART2_BASE || \
(pt)->port.mapbase == OMAP1_UART3_BASE) \
__ret = 1; \
__ret; \
})
#ifndef __ASSEMBLER__
struct omap_board_data;
struct omap_uart_port_info;
extern void omap_serial_init(void);
extern void omap_serial_board_init(struct omap_uart_port_info *platform_data);
extern void omap_serial_init_port(struct omap_board_data *bdata,
struct omap_uart_port_info *platform_data);
#endif
#endif
|
/*
* This header is generated by classdump-dyld 1.0
* on Sunday, September 27, 2020 at 11:41:21 AM Mountain Standard Time
* Operating System: Version 14.0 (Build 18A373)
* Image Source: /System/Library/PrivateFrameworks/UIAccessibility.framework/UIAccessibility
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos.
*/
typedef struct _NSZone* NSZoneRef;
typedef struct objc_method* objc_methodRef;
typedef struct CGPath* CGPathRef;
typedef struct CGColor* CGColorRef;
typedef struct CGPoint {
double x;
double y;
} CGPoint;
typedef struct CGSize {
double width;
double height;
} CGSize;
typedef struct CGRect {
CGPoint origin;
CGSize size;
} CGRect;
typedef struct NSRange {
unsigned long long location;
unsigned long long length;
} NSRange;
|
/*
* This header is generated by classdump-dyld 1.5
* on Tuesday, November 10, 2020 at 10:12:27 PM Mountain Standard Time
* Operating System: Version 14.2 (Build 18K57)
* Image Source: /System/Library/PrivateFrameworks/AccountsDaemon.framework/AccountsDaemon
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos. Updated by Kevin Bradley.
*/
#import <Accounts/ACAccountStore.h>
#import <libobjc.A.dylib/ACRemoteAccountStoreProtocol.h>
@protocol ACDAccountStoreDelegate;
@class NSMutableArray, ACDDatabaseConnection, ACDClientAuthorizationManager, ACDFakeRemoteAccountStoreSession, ACDClient, ACRemoteDeviceProxy, ACDAuthenticationPluginManager, ACDAccessPluginManager, ACDDataclassOwnersManager, ACDAuthenticationDialogManager, ACDAccountNotifier, ACDDatabaseBackupActivity, NSString;
@interface ACDAccountStore : ACAccountStore <ACRemoteAccountStoreProtocol> {
NSMutableArray* _accountChanges;
ACDDatabaseConnection* _databaseConnection;
ACDClientAuthorizationManager* _authorizationManager;
ACDFakeRemoteAccountStoreSession* _fakeRemoteAccountStoreSession;
BOOL _notificationsEnabled;
BOOL _migrationInProgress;
id<ACDAccountStoreDelegate> _delegate;
ACDClient* _client;
ACRemoteDeviceProxy* _remoteDeviceProxy;
ACDAuthenticationPluginManager* _authenticationPluginManager;
ACDAccessPluginManager* _accessPluginManager;
ACDDataclassOwnersManager* _dataclassOwnersManager;
ACDAuthenticationDialogManager* _authenticationDialogManager;
ACDAccountNotifier* _accountNotifier;
ACDDatabaseBackupActivity* _databaseBackupActivity;
}
@property (nonatomic,readonly) ACDDatabaseConnection * databaseConnection; //@synthesize databaseConnection=_databaseConnection - In the implementation block
@property (nonatomic,retain) ACDAuthenticationPluginManager * authenticationPluginManager; //@synthesize authenticationPluginManager=_authenticationPluginManager - In the implementation block
@property (nonatomic,retain) ACDAccessPluginManager * accessPluginManager; //@synthesize accessPluginManager=_accessPluginManager - In the implementation block
@property (nonatomic,retain) ACDDataclassOwnersManager * dataclassOwnersManager; //@synthesize dataclassOwnersManager=_dataclassOwnersManager - In the implementation block
@property (nonatomic,retain) ACDAuthenticationDialogManager * authenticationDialogManager; //@synthesize authenticationDialogManager=_authenticationDialogManager - In the implementation block
@property (nonatomic,retain) ACRemoteDeviceProxy * remoteDeviceProxy; //@synthesize remoteDeviceProxy=_remoteDeviceProxy - In the implementation block
@property (nonatomic,retain) ACDAccountNotifier * accountNotifier; //@synthesize accountNotifier=_accountNotifier - In the implementation block
@property (nonatomic,retain) ACDDatabaseBackupActivity * databaseBackupActivity; //@synthesize databaseBackupActivity=_databaseBackupActivity - In the implementation block
@property (assign,nonatomic) BOOL notificationsEnabled; //@synthesize notificationsEnabled=_notificationsEnabled - In the implementation block
@property (assign,getter=isMigrationInProgress,nonatomic) BOOL migrationInProgress; //@synthesize migrationInProgress=_migrationInProgress - In the implementation block
@property (assign,nonatomic,__weak) id<ACDAccountStoreDelegate> delegate; //@synthesize delegate=_delegate - In the implementation block
@property (assign,nonatomic,__weak) ACDClient * client; //@synthesize client=_client - In the implementation block
@property (nonatomic,readonly) ACDClientAuthorizationManager * authorizationManager; //@synthesize authorizationManager=_authorizationManager - In the implementation block
@property (readonly) unsigned long long hash;
@property (readonly) Class superclass;
@property (copy,readonly) NSString * description;
@property (copy,readonly) NSString * debugDescription;
-(id)init;
-(id<ACDAccountStoreDelegate>)delegate;
-(void)setDelegate:(id<ACDAccountStoreDelegate>)arg1 ;
-(ACDClient *)client;
-(void)kerberosAccountsForDomainFromURL:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)setClient:(ACDClient *)arg1 ;
-(id)remoteAccountStoreSession;
-(void)registerMonitorForAccountsOfTypes:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)setNotificationsEnabled:(BOOL)arg1 ;
-(id)longLivedRemoteAccountStoreSession;
-(void)accountWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)accountTypeWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)accountsWithHandler:(/*^block*/id)arg1 ;
-(void)accountsWithAccountType:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)accountsWithAccountType:(id)arg1 options:(unsigned long long)arg2 completion:(/*^block*/id)arg3 ;
-(void)accountsWithAccountTypeIdentifiers:(id)arg1 preloadedProperties:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)accountsOnPairedDeviceWithAccountType:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)dataclassesWithHandler:(/*^block*/id)arg1 ;
-(void)accountTypesWithHandler:(/*^block*/id)arg1 ;
-(void)visibleTopLevelAccountsWithAccountTypeIdentifiers:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)accountExistsWithDescription:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)isPushSupportedForAccount:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)insertAccountType:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)removeAccountType:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)removeAccount:(id)arg1 withDataclassActions:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)canSaveAccount:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)saveAccount:(id)arg1 verify:(BOOL)arg2 dataclassActions:(id)arg3 completion:(/*^block*/id)arg4 ;
-(void)updateExistenceCacheOfAccountWithTypeIdentifier:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)requestAccessForAccountTypeWithIdentifier:(id)arg1 options:(id)arg2 withHandler:(/*^block*/id)arg3 ;
-(void)accessKeysForAccountType:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)appPermissionsForAccountType:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)setPermissionGranted:(id)arg1 forBundleID:(id)arg2 onAccountType:(id)arg3 withHandler:(/*^block*/id)arg4 ;
-(void)clearAllPermissionsGrantedForAccountType:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)permissionForAccountType:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)grantedPermissionsForAccountType:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)clearGrantedPermissionsForAccountType:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)verifyCredentialsForAccount:(id)arg1 options:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)renewCredentialsForAccount:(id)arg1 options:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)migrateCredentialForAccount:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)credentialForAccountWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)credentialForAccount:(id)arg1 serviceID:(id)arg2 handler:(/*^block*/id)arg3 ;
-(void)setCredential:(id)arg1 forAccount:(id)arg2 serviceID:(id)arg3 completion:(/*^block*/id)arg4 ;
-(void)credentialItemsWithCompletion:(/*^block*/id)arg1 ;
-(void)credentialItemForAccount:(id)arg1 serviceName:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)insertCredentialItem:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)saveCredentialItem:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)removeCredentialItem:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)parentAccountForAccountWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)childAccountsForAccountWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)childAccountsWithAccountTypeIdentifier:(id)arg1 parentAccountIdentifier:(id)arg2 handler:(/*^block*/id)arg3 ;
-(void)enabledDataclassesForAccountWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)provisionedDataclassesForAccountWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)supportedDataclassesForAccountType:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)syncableDataclassesForAccountType:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)displayAccountTypeForAccountWithIdentifier:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)accountIdentifiersEnabledForDataclass:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)accountIdentifiersEnabledToSyncDataclass:(id)arg1 handler:(/*^block*/id)arg2 ;
-(void)preloadDataclassOwnersWithCompletion:(/*^block*/id)arg1 ;
-(void)dataclassActionsForAccountSave:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)dataclassActionsForAccountDeletion:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)isPerformingDataclassActionsForAccount:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)isTetheredSyncingEnabledForDataclass:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)tetheredSyncSourceTypeForDataclass:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)accountIdentifiersEnabledForDataclasses:(id)arg1 withAccountTypeIdentifiers:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)clientTokenForAccountIdentifier:(id)arg1 completion:(/*^block*/id)arg2 ;
-(void)addClientToken:(id)arg1 forAccountIdentifier:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)discoverPropertiesForAccount:(id)arg1 options:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)openAuthenticationURL:(id)arg1 forAccount:(id)arg2 shouldConfirm:(BOOL)arg3 completion:(/*^block*/id)arg4 ;
-(void)openAuthenticationURLForAccount:(id)arg1 withDelegateClassName:(id)arg2 fromBundleAtPath:(id)arg3 shouldConfirm:(BOOL)arg4 completion:(/*^block*/id)arg5 ;
-(void)notifyRemoteDevicesOfModifiedAccount:(id)arg1 withChangeType:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)saveAccount:(id)arg1 toPairedDeviceWithOptions:(id)arg2 completion:(/*^block*/id)arg3 ;
-(void)removeAccountsFromPairedDeviceWithCompletion:(/*^block*/id)arg1 ;
-(void)triggerKeychainMigrationIfNecessary:(/*^block*/id)arg1 ;
-(void)typeIdentifierForDomain:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)handleURL:(id)arg1 ;
-(void)scheduleBackupIfNonexistent:(/*^block*/id)arg1 ;
-(void)resetDatabaseToVersion:(id)arg1 withCompletion:(/*^block*/id)arg2 ;
-(void)shutdownAccountsD:(/*^block*/id)arg1 ;
-(void)setClientBundleID:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(BOOL)notificationsEnabled;
-(void)saveAccount:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(void)notifyRemoteDevicesOfModifiedAccount:(id)arg1 withChangeType:(id)arg2 ;
-(id)_accountTypeWithIdentifier:(id)arg1 ;
-(id)initWithClient:(id)arg1 databaseConnection:(id)arg2 ;
-(ACDAccountNotifier *)accountNotifier;
-(id)_childAccountsForAccountWithID:(id)arg1 ;
-(BOOL)isMigrationInProgress;
-(BOOL)_performDataclassActions:(id)arg1 forAccount:(id)arg2 error:(id*)arg3 ;
-(id)_clientTokenQueue;
-(void)_removeClientTokenForAccountIdentifer:(id)arg1 ;
-(void)_delegate_accountStoreDidSaveAccount:(id)arg1 changeType:(int)arg2 ;
-(id)_accountWithIdentifier:(id)arg1 prefetchKeypaths:(id)arg2 ;
-(ACDDataclassOwnersManager *)dataclassOwnersManager;
-(id)_addAccountNoSave:(id)arg1 withDataclassActions:(id)arg2 error:(id*)arg3 ;
-(void)_setAccountManagedObjectRelationships:(id)arg1 withAccount:(id)arg2 oldAccount:(id)arg3 error:(id*)arg4 ;
-(id)_commitOrRollbackDataclassActions:(id)arg1 forAccount:(id)arg2 originalEnabledDataclasses:(id)arg3 ;
-(BOOL)_updateAccountNoSave:(id)arg1 withDataclassActions:(id)arg2 error:(id*)arg3 ;
-(id)_accountWithIdentifier:(id)arg1 ;
-(void)_deleteAccountNoSave:(id)arg1 withDataclassActions:(id)arg2 error:(id*)arg3 ;
-(id)_displayAccountForAccount:(id)arg1 ;
-(id)_dataclassWithName:(id)arg1 createIfNecessary:(BOOL)arg2 ;
-(void)_updateExistenceCacheOfAccountWithTypeIdentifier:(id)arg1 withHandler:(/*^block*/id)arg2 ;
-(BOOL)_saveWithError:(id*)arg1 ;
-(id)_allAccounts_sync;
-(id)_legacyCredentialForAccount:(id)arg1 client:(id)arg2 error:(id*)arg3 ;
-(id)_credentialItemWithAccountIdentifier:(id)arg1 serviceName:(id)arg2 ;
-(id)_accountsWithAccountType:(id)arg1 options:(unsigned long long)arg2 error:(id*)arg3 ;
-(id)_predicateForFetchingAccountsWithManagedAccountTypeID:(id)arg1 options:(unsigned long long)arg2 ;
-(BOOL)_canManagedAccountType:(id)arg1 syncManagedDataclass:(id)arg2 ;
-(BOOL)_isManagedAccount:(id)arg1 enabledForManagedDataclass:(id)arg2 ;
-(BOOL)accountsExistWithAccountTypeIdentifier:(id)arg1 ;
-(BOOL)_removeAccountNoSave:(id)arg1 withDataclassActions:(id)arg2 withError:(id*)arg3 ;
-(BOOL)shouldPreventAccountCreationWithObsoleteAccountType;
-(BOOL)_canSaveAccount:(id)arg1 error:(id*)arg2 ;
-(void)_completeSave:(id)arg1 dataclassActions:(id)arg2 completion:(/*^block*/id)arg3 ;
-(id)_lockForAccountType:(id)arg1 ;
-(BOOL)_handleAccountAdd:(id)arg1 withDataclassActions:(id)arg2 error:(id*)arg3 ;
-(BOOL)_handleAccountMod:(id)arg1 withDataclassActions:(id)arg2 withError:(id*)arg3 ;
-(id)accountTypeWithIdentifier:(id)arg1 ;
-(void)_requestAccessForAccountTypeWithIdentifier:(id)arg1 options:(id)arg2 allowUserInteraction:(BOOL)arg3 withHandler:(/*^block*/id)arg4 ;
-(ACDClientAuthorizationManager *)authorizationManager;
-(id)_clientTokenForAccountIdentifier:(id)arg1 error:(id)arg2 ;
-(ACDAuthenticationDialogManager *)authenticationDialogManager;
-(ACDDatabaseBackupActivity *)databaseBackupActivity;
-(id)accountsWithAccountTypeIdentifier:(id)arg1 ;
-(void)addAccountNoSave:(id)arg1 error:(id*)arg2 ;
-(void)updateAccountNoSave:(id)arg1 error:(id*)arg2 ;
-(void)deleteAccountNoSave:(id)arg1 error:(id*)arg2 ;
-(id)masterCredentialForAccountIdentifier:(id)arg1 ;
-(id)_accountsWithAcountType:(id)arg1 error:(id*)arg2 ;
-(void)saveAccount:(id)arg1 pid:(id)arg2 verify:(BOOL)arg3 dataclassActions:(id)arg4 completion:(/*^block*/id)arg5 ;
-(ACRemoteDeviceProxy *)remoteDeviceProxy;
-(void)setRemoteDeviceProxy:(ACRemoteDeviceProxy *)arg1 ;
-(ACDDatabaseConnection *)databaseConnection;
-(ACDAuthenticationPluginManager *)authenticationPluginManager;
-(void)setAuthenticationPluginManager:(ACDAuthenticationPluginManager *)arg1 ;
-(ACDAccessPluginManager *)accessPluginManager;
-(void)setAccessPluginManager:(ACDAccessPluginManager *)arg1 ;
-(void)setDataclassOwnersManager:(ACDDataclassOwnersManager *)arg1 ;
-(void)setAuthenticationDialogManager:(ACDAuthenticationDialogManager *)arg1 ;
-(void)setAccountNotifier:(ACDAccountNotifier *)arg1 ;
-(void)setDatabaseBackupActivity:(ACDDatabaseBackupActivity *)arg1 ;
-(void)setMigrationInProgress:(BOOL)arg1 ;
@end
|
var assert = require("assert");
var Kraken = require("../kraken");
var BellmanFord = require("../../src/algorithms/bellman-ford");
suite("algorithms", function() {
suite("bellman-ford", function() {
test("basics", function() {
var graph = Kraken()
.add("a")
.add("b")
.add("c")
.add("d")
.add("e")
.connect("a", "b")
.connect("a", "c")
.connect("b", "a")
.connect("b", "c")
.connect("b", "d")
.connect("c", "d")
.connect("d", "a")
.connect("d", "e");
var a = graph.get("a");
var b = graph.get("b");
var c = graph.get("c");
var d = graph.get("d");
var e = graph.get("e");
var shortest = BellmanFord(graph, a);
assert.equal(shortest.distance(a, a), 0);
assert.equal(shortest.distance(a, b), 1);
assert.equal(shortest.distance(a, c), 1);
assert.equal(shortest.distance(a, d), 2);
assert.equal(shortest.distance(a, e), 3);
assert.equal(shortest.previous(a, a), undefined);
assert.equal(shortest.previous(a, b), a);
assert.equal(shortest.previous(a, c), a);
assert.equal(shortest.previous(a, d), b);
assert.equal(shortest.previous(a, e), d);
});
// test("throws error on negative cycle", function() {
// });
});
});
|
from clikit.io import ConsoleIO as BaseConsoleIO
from .io_mixin import IOMixin
class ConsoleIO(IOMixin, BaseConsoleIO):
"""
A wrapper around CliKit's ConsoleIO.
"""
def __init__(self, *args, **kwargs):
super(ConsoleIO, self).__init__(*args, **kwargs)
|
import buildLine from './buildLine';
import { SHAPES } from '../../../const';
import { hex2rgb } from '../../../utils';
/**
* Builds a circle to draw
*
* Ignored from docs since it is not directly exposed.
*
* @ignore
* @private
* @param {PIXI.WebGLGraphicsData} graphicsData - The graphics object to draw
* @param {object} webGLData - an object containing all the webGL-specific information to create this shape
*/
export default function buildCircle(graphicsData, webGLData)
{
// need to convert points to a nice regular data
const circleData = graphicsData.shape;
const x = circleData.x;
const y = circleData.y;
let width;
let height;
// TODO - bit hacky??
if (graphicsData.type === SHAPES.CIRC)
{
width = circleData.radius;
height = circleData.radius;
}
else
{
width = circleData.width;
height = circleData.height;
}
if (width === 0 || height === 0)
{
return;
}
const totalSegs = Math.floor(30 * Math.sqrt(circleData.radius))
|| Math.floor(15 * Math.sqrt(circleData.width + circleData.height));
const seg = (Math.PI * 2) / totalSegs;
if (graphicsData.fill)
{
const color = hex2rgb(graphicsData.fillColor);
const alpha = graphicsData.fillAlpha;
const r = color[0] * alpha;
const g = color[1] * alpha;
const b = color[2] * alpha;
const verts = webGLData.points;
const indices = webGLData.indices;
let vecPos = verts.length / 6;
indices.push(vecPos);
for (let i = 0; i < totalSegs + 1; i++)
{
verts.push(x, y, r, g, b, alpha);
verts.push(
x + (Math.sin(seg * i) * width),
y + (Math.cos(seg * i) * height),
r, g, b, alpha
);
indices.push(vecPos++, vecPos++);
}
indices.push(vecPos - 1);
}
if (graphicsData.lineWidth)
{
const tempPoints = graphicsData.points;
graphicsData.points = [];
for (let i = 0; i < totalSegs + 1; i++)
{
graphicsData.points.push(
x + (Math.sin(seg * i) * width),
y + (Math.cos(seg * i) * height)
);
}
buildLine(graphicsData, webGLData);
graphicsData.points = tempPoints;
}
}
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_gazebo"
PROJECT_SPACE_DIR = "/home/orwah/catkin_ws/install"
PROJECT_VERSION = "1.2.5"
|
import numpy as np
from secml.array import CArray
from secml.ml import CNormalizerDNN
from .alexnet import alexnet
def attach_alexnet(clf):
"""Attach AlexNet (as a preprocessor) to input CClassifier.
The output of `classifier:4` layer is used as input for the classifier.
"""
clf.preprocess = CNormalizerDNN(net=alexnet(), out_layer='classifier:4')
def ds_numpy_to_pytorch(ds):
"""Converts ds samples from numpy flatten to pytorch flatten."""
imgs = ds.X
# Pytorch networks expects images in the tensor format (C x H x W)
# Our images have been flatten from the numpy format (H x W x C)
# We firstly need to get back to (n_samples x H x W x C)
imgs = imgs.tondarray().reshape(
(imgs.shape[0], ds.header.img_h, ds.header.img_w, 3))
# Then we move the "C" axis to the correct position,
# and finally ravel the rows again, done.
imgs = np.moveaxis(imgs, 3, 1).reshape(
imgs.shape[0], 3 * ds.header.img_h * ds.header.img_w)
ds.X = CArray(imgs)
|
#pragma once
#include "core/AppItems/mvTypeBases.h"
#include "mvAppItem.h"
namespace Marvel {
class mvStyleWindow : public mvBaseWindowAppitem
{
MV_APPITEM_TYPE(mvAppItemType::StyleWindow, "add_style_window")
public:
mvStyleWindow(const std::string& name)
: mvBaseWindowAppitem(name)
{
m_description.deleteAllowed = false;
}
void draw() override;
};
} |
# -*- encoding: utf-8 -*-
from mmpy_bot.utils import allowed_users
from mmpy_bot.bot import respond_to
import sys
import os
sys.path.append(os.getcwd()) # enable importing driver_settings
from tests.behavior_tests.bots import driver_settings
@respond_to('^allowed_driver$')
@allowed_users(driver_settings.BOT_NAME)
def driver_allowed_hello(message):
message.reply('Driver allowed!')
@respond_to('^not_allowed_driver$')
@allowed_users('somebody-not-driver')
def driver_not_allowed_hello(message):
message.reply('Driver not allowed!')
@respond_to('^allowed_driver_by_email$')
@allowed_users(driver_settings.BOT_LOGIN)
def driver_allowed_hello_by_email(message):
message.reply('Driver email allowed!')
|
import React, { Component } from "react";
import axios from "axios";
import Map from "./maps";
import Devices from "./deviceList";
export default class devicePage extends Component {
state = {
devices: [],
coordinates: [],
page: 0,
token: ""
};
onClickHandler = id => {
console.log(id);
this.setState({
coordinates: [],
page: 0
});
let loop = () => {
axios({
method: "get",
url: `http://localhost:5500/api/device/${id}?page=${this.state.page}`,
headers: {
"authentication": localStorage.getItem("token")
}
})
.then(res => {
if (res.data.length > 0) {
let coordsTemp = [...this.state.coordinates];
res.data.forEach(coord => {
if (coord.gps && coord.gps[0] && coord.gps[1] && coord.gps[0] != null && coord.gps[1] != null) {
coordsTemp.push({ lat: coord.gps[0], lng: coord.gps[1] })
}
});
this.setState({ coordinates: [...new Set(coordsTemp)], page: this.state.page + 1 });
console.log("From Onclick in maps>", this.state.page);
loop();
}
})
.catch(err => console.log(err));
}
loop();
};
componentDidMount() {
axios({
method: "get",
url: 'http://localhost:5500/api/device',
headers: {
"authentication": localStorage.getItem("token")
}
})
.then(res => {
this.setState({ devices: res.data });
console.log("State: Set");
});
}
render() {
return (
<div style={{ maxWidth: "85vw", margin: "auto" }}>
<div className="row">
<section className="col-3">
<Devices onClick={this.onClickHandler} list={this.state.devices} />
</section>
<section className="col-9">
<Map coords={this.state.coordinates} />
</section>
</div>
</div>
);
}
}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
@pytest.mark.functional
class TestGroupAPI(object):
def test_leave_group(self, app, group, group_member_with_token):
"""Test a request to leave a group through the API."""
group_member, token = group_member_with_token
headers = {'Authorization': str('Bearer {}'.format(token.value))}
app.delete('/api/groups/{}/members/me'.format(group.pubid),
headers=headers)
# We currently have no elegant way to check this via the API, but in a
# future version we should be able to make a GET request here for the
# group information and check it 404s
assert group_member not in group.members
@pytest.fixture
def group(db_session, factories):
group = factories.Group()
db_session.commit()
return group
@pytest.fixture
def group_member(group, db_session, factories):
user = factories.User()
group.members.append(user)
db_session.commit()
return user
@pytest.fixture
def group_member_with_token(group_member, db_session, factories):
token = factories.DeveloperToken(userid=group_member.userid)
db_session.add(token)
db_session.commit()
return (group_member, token)
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from prometheus_client import Counter
from twisted.internet import defer
import synapse
from synapse.api.constants import EventTypes
from synapse.appservice import ApplicationService
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics import (
event_processing_loop_counter,
event_processing_loop_room_count,
)
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.storage.databases.main.directory import RoomAliasMapping
from synapse.types import Collection, JsonDict, RoomAlias, RoomStreamToken, UserID
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
logger = logging.getLogger(__name__)
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
class ApplicationServicesHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
self.is_mine_id = hs.is_mine_id
self.appservice_api = hs.get_application_service_api()
self.scheduler = hs.get_application_service_scheduler()
self.started_scheduler = False
self.clock = hs.get_clock()
self.notify_appservices = hs.config.notify_appservices
self.event_sources = hs.get_event_sources()
self.current_max = 0
self.is_processing = False
def notify_interested_services(self, max_token: RoomStreamToken):
"""Notifies (pushes) all application services interested in this event.
Pushing is done asynchronously, so this method won't block for any
prolonged length of time.
"""
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
current_id = max_token.stream
services = self.store.get_app_services()
if not services or not self.notify_appservices:
return
self.current_max = max(self.current_max, current_id)
if self.is_processing:
return
# We only start a new background process if necessary rather than
# optimistically (to cut down on overhead).
self._notify_interested_services(max_token)
@wrap_as_background_process("notify_interested_services")
async def _notify_interested_services(self, max_token: RoomStreamToken):
with Measure(self.clock, "notify_interested_services"):
self.is_processing = True
try:
limit = 100
while True:
(
upper_bound,
events,
) = await self.store.get_new_events_for_appservice(
self.current_max, limit
)
if not events:
break
events_by_room = {} # type: Dict[str, List[EventBase]]
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
async def handle_event(event):
# Gather interested services
services = await self._get_services_for_event(event)
if len(services) == 0:
return # no services need notifying
# Do we know this user exists? If not, poke the user
# query API for all services which match that user regex.
# This needs to block as these user queries need to be
# made BEFORE pushing the event.
await self._check_user_exists(event.sender)
if event.type == EventTypes.Member:
await self._check_user_exists(event.state_key)
if not self.started_scheduler:
async def start_scheduler():
try:
return await self.scheduler.start()
except Exception:
logger.error("Application Services Failure")
run_as_background_process("as_scheduler", start_scheduler)
self.started_scheduler = True
# Fork off pushes to these services
for service in services:
self.scheduler.submit_event_for_as(service, event)
now = self.clock.time_msec()
ts = await self.store.get_received_ts(event.event_id)
synapse.metrics.event_processing_lag_by_event.labels(
"appservice_sender"
).observe((now - ts) / 1000)
async def handle_room_events(events):
for event in events:
await handle_event(event)
await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(handle_room_events, evs)
for evs in events_by_room.values()
],
consumeErrors=True,
)
)
await self.store.set_appservice_last_pos(upper_bound)
now = self.clock.time_msec()
ts = await self.store.get_received_ts(events[-1].event_id)
synapse.metrics.event_processing_positions.labels(
"appservice_sender"
).set(upper_bound)
events_processed_counter.inc(len(events))
event_processing_loop_room_count.labels("appservice_sender").inc(
len(events_by_room)
)
event_processing_loop_counter.labels("appservice_sender").inc()
synapse.metrics.event_processing_lag.labels(
"appservice_sender"
).set(now - ts)
synapse.metrics.event_processing_last_ts.labels(
"appservice_sender"
).set(ts)
finally:
self.is_processing = False
def notify_interested_services_ephemeral(
self,
stream_key: str,
new_token: Optional[int],
users: Collection[Union[str, UserID]] = [],
):
"""This is called by the notifier in the background
when a ephemeral event handled by the homeserver.
This will determine which appservices
are interested in the event, and submit them.
Events will only be pushed to appservices
that have opted into ephemeral events
Args:
stream_key: The stream the event came from.
new_token: The latest stream token
users: The user(s) involved with the event.
"""
if not self.notify_appservices:
return
if stream_key not in ("typing_key", "receipt_key", "presence_key"):
return
services = [
service
for service in self.store.get_app_services()
if service.supports_ephemeral
]
if not services:
return
# We only start a new background process if necessary rather than
# optimistically (to cut down on overhead).
self._notify_interested_services_ephemeral(
services, stream_key, new_token, users
)
@wrap_as_background_process("notify_interested_services_ephemeral")
async def _notify_interested_services_ephemeral(
self,
services: List[ApplicationService],
stream_key: str,
new_token: Optional[int],
users: Collection[Union[str, UserID]],
):
logger.debug("Checking interested services for %s" % (stream_key))
with Measure(self.clock, "notify_interested_services_ephemeral"):
for service in services:
# Only handle typing if we have the latest token
if stream_key == "typing_key" and new_token is not None:
events = await self._handle_typing(service, new_token)
if events:
self.scheduler.submit_ephemeral_events_for_as(service, events)
# We don't persist the token for typing_key for performance reasons
elif stream_key == "receipt_key":
events = await self._handle_receipts(service)
if events:
self.scheduler.submit_ephemeral_events_for_as(service, events)
await self.store.set_type_stream_id_for_appservice(
service, "read_receipt", new_token
)
elif stream_key == "presence_key":
events = await self._handle_presence(service, users)
if events:
self.scheduler.submit_ephemeral_events_for_as(service, events)
await self.store.set_type_stream_id_for_appservice(
service, "presence", new_token
)
async def _handle_typing(
self, service: ApplicationService, new_token: int
) -> List[JsonDict]:
typing_source = self.event_sources.sources["typing"]
# Get the typing events from just before current
typing, _ = await typing_source.get_new_events_as(
service=service,
# For performance reasons, we don't persist the previous
# token in the DB and instead fetch the latest typing information
# for appservices.
from_key=new_token - 1,
)
return typing
async def _handle_receipts(self, service: ApplicationService) -> List[JsonDict]:
from_key = await self.store.get_type_stream_id_for_appservice(
service, "read_receipt"
)
receipts_source = self.event_sources.sources["receipt"]
receipts, _ = await receipts_source.get_new_events_as(
service=service, from_key=from_key
)
return receipts
async def _handle_presence(
self, service: ApplicationService, users: Collection[Union[str, UserID]]
) -> List[JsonDict]:
events = [] # type: List[JsonDict]
presence_source = self.event_sources.sources["presence"]
from_key = await self.store.get_type_stream_id_for_appservice(
service, "presence"
)
for user in users:
if isinstance(user, str):
user = UserID.from_string(user)
interested = await service.is_interested_in_presence(user, self.store)
if not interested:
continue
presence_events, _ = await presence_source.get_new_events(
user=user,
service=service,
from_key=from_key,
)
time_now = self.clock.time_msec()
events.extend(
{
"type": "m.presence",
"sender": event.user_id,
"content": format_user_presence_state(
event, time_now, include_user_id=False
),
}
for event in presence_events
)
return events
async def query_user_exists(self, user_id: str) -> bool:
"""Check if any application service knows this user_id exists.
Args:
user_id: The user to query if they exist on any AS.
Returns:
True if this user exists on at least one application service.
"""
user_query_services = self._get_services_for_user(user_id=user_id)
for user_service in user_query_services:
is_known_user = await self.appservice_api.query_user(user_service, user_id)
if is_known_user:
return True
return False
async def query_room_alias_exists(
self, room_alias: RoomAlias
) -> Optional[RoomAliasMapping]:
"""Check if an application service knows this room alias exists.
Args:
room_alias: The room alias to query.
Returns:
namedtuple: with keys "room_id" and "servers" or None if no
association can be found.
"""
room_alias_str = room_alias.to_string()
services = self.store.get_app_services()
alias_query_services = [
s for s in services if (s.is_interested_in_alias(room_alias_str))
]
for alias_service in alias_query_services:
is_known_alias = await self.appservice_api.query_alias(
alias_service, room_alias_str
)
if is_known_alias:
# the alias exists now so don't query more ASes.
return await self.store.get_association_from_room_alias(room_alias)
return None
async def query_3pe(
self, kind: str, protocol: str, fields: Dict[bytes, List[bytes]]
) -> List[JsonDict]:
services = self._get_services_for_3pn(protocol)
results = await make_deferred_yieldable(
defer.DeferredList(
[
run_in_background(
self.appservice_api.query_3pe, service, kind, protocol, fields
)
for service in services
],
consumeErrors=True,
)
)
ret = []
for (success, result) in results:
if success:
ret.extend(result)
return ret
async def get_3pe_protocols(
self, only_protocol: Optional[str] = None
) -> Dict[str, JsonDict]:
services = self.store.get_app_services()
protocols = {} # type: Dict[str, List[JsonDict]]
# Collect up all the individual protocol responses out of the ASes
for s in services:
for p in s.protocols:
if only_protocol is not None and p != only_protocol:
continue
if p not in protocols:
protocols[p] = []
info = await self.appservice_api.get_3pe_protocol(s, p)
if info is not None:
protocols[p].append(info)
def _merge_instances(infos: List[JsonDict]) -> JsonDict:
if not infos:
return {}
# Merge the 'instances' lists of multiple results, but just take
# the other fields from the first as they ought to be identical
# copy the result so as not to corrupt the cached one
combined = dict(infos[0])
combined["instances"] = list(combined["instances"])
for info in infos[1:]:
combined["instances"].extend(info["instances"])
return combined
return {p: _merge_instances(protocols[p]) for p in protocols.keys()}
async def _get_services_for_event(
self, event: EventBase
) -> List[ApplicationService]:
"""Retrieve a list of application services interested in this event.
Args:
event: The event to check. Can be None if alias_list is not.
Returns:
A list of services interested in this event based on the service regex.
"""
services = self.store.get_app_services()
# we can't use a list comprehension here. Since python 3, list
# comprehensions use a generator internally. This means you can't yield
# inside of a list comprehension anymore.
interested_list = []
for s in services:
if await s.is_interested(event, self.store):
interested_list.append(s)
return interested_list
def _get_services_for_user(self, user_id: str) -> List[ApplicationService]:
services = self.store.get_app_services()
return [s for s in services if (s.is_interested_in_user(user_id))]
def _get_services_for_3pn(self, protocol: str) -> List[ApplicationService]:
services = self.store.get_app_services()
return [s for s in services if s.is_interested_in_protocol(protocol)]
async def _is_unknown_user(self, user_id: str) -> bool:
if not self.is_mine_id(user_id):
# we don't know if they are unknown or not since it isn't one of our
# users. We can't poke ASes.
return False
user_info = await self.store.get_user_by_id(user_id)
if user_info:
return False
# user not found; could be the AS though, so check.
services = self.store.get_app_services()
service_list = [s for s in services if s.sender == user_id]
return len(service_list) == 0
async def _check_user_exists(self, user_id: str) -> bool:
unknown_user = await self._is_unknown_user(user_id)
if unknown_user:
return await self.query_user_exists(user_id)
return True
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[38],{230:function(a,e,o){"use strict";o.r(e);var r=o(0),t=Object(r.a)({},function(){var a=this,e=a.$createElement,o=a._self._c||e;return o("ContentSlotsDistributor",{attrs:{"slot-key":a.$parent.slotKey}},[o("h1",{attrs:{id:"carregar-modelo"}},[a._v("Carregar Modelo")]),a._v(" "),o("p",[a._v("Carrega um modelo previamente gerado pelo LEMONADE e salvo usando a operação "),o("router-link",{attrs:{to:"/pt-br/"}},[a._v("Salvar modelo")]),a._v(". O modelo carregado pode ser aplicado em um novo conjunto de dados, sendo reutilizado.")],1),a._v(" "),o("h3",{attrs:{id:"conectores"}},[a._v("Conectores")]),a._v(" "),o("table",[o("thead",[o("tr",[o("th",[a._v("Entrada")]),a._v(" "),o("th",[a._v("Saída")])])]),a._v(" "),o("tbody",[o("tr",[o("td",[a._v("Não tem")]),a._v(" "),o("td",[a._v("Modelo carregado")])])])]),a._v(" "),o("h3",{attrs:{id:"tarefa"}},[a._v("Tarefa")]),a._v(" "),o("p",[a._v("Nome da Tarefa")]),a._v(" "),o("h3",{attrs:{id:"aba-execucao"}},[a._v("Aba Execução")]),a._v(" "),o("table",[o("thead",[o("tr",[o("th",[a._v("Parâmetro")]),a._v(" "),o("th",[a._v("Detalhe")])])]),a._v(" "),o("tbody",[o("tr",[o("td",[o("strong",[a._v("Modelo")])]),a._v(" "),o("td",[a._v("Modelo anteriormente salvo a ser carregado")])])])]),a._v(" "),o("p",[o("router-link",{attrs:{to:"/pt-br/"}},[a._v("Aba Aparência")])],1),a._v(" "),o("p",[o("router-link",{attrs:{to:"/pt-br/"}},[a._v("Aba Resultados")])],1),a._v(" "),o("h2",{attrs:{id:"exemplo-de-utilizacao"}},[a._v("Exemplo de Utilização")]),a._v(" "),o("p",[o("strong",[a._v("Objetivo:")]),a._v(" Carregar um modelo salvo. No exemplo apresentado, será utilizada a base de dados íris. O modelo será carregado, aplicado a base íris e avaliado."),o("br"),a._v(" "),o("strong",[a._v("Base de Dados:")]),a._v(" "),o("router-link",{attrs:{to:"/pt-br/"}},[a._v("Íris")])],1),a._v(" "),o("p",[o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image6.png",alt:"Ler dados"}})]),a._v(" "),o("ol",[o("li",[o("p",[a._v("Adicione uma base de dados por meio da operação "),o("router-link",{attrs:{to:"/pt-br/"}},[a._v("Ler dados")]),a._v("."),o("br"),a._v(" "),o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image5.png",alt:"Tabela - Ler dados"}})],1)]),a._v(" "),o("li",[o("p",[a._v("Na operação "),o("router-link",{attrs:{to:"/pt-br/"}},[a._v("Converter categórico")]),a._v(" para numérico, selecione "),o("em",[a._v("“class”")]),a._v(" no campo "),o("strong",[a._v("Atributos")]),a._v(". Preencha "),o("em",[a._v("“class_label”")]),a._v(" no campo "),o("strong",[a._v("Nome para novos atributos indexados")]),a._v("."),o("br"),a._v(" "),o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image4.png",alt:"Converter categórico para numérico"}})],1)]),a._v(" "),o("li",[o("p",[a._v("Adicione a operação "),o("router-link",{attrs:{to:"/pt-br/"}},[a._v("Aplicar modelo")]),a._v(", selecione "),o("em",[a._v("“petal_length”")]),a._v(", "),o("em",[a._v("“petal_width”")]),a._v(", "),o("em",[a._v("“sepal_length”")]),a._v(" e "),o("em",[a._v("“sepal_width”")]),a._v(" no campo "),o("strong",[a._v("Atributos previsores")]),a._v(" e preencha "),o("em",[a._v("“prediction”")]),a._v(" no campo "),o("strong",[a._v("Nome do novo atributo")]),a._v("."),o("br"),a._v(" "),o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image3.png",alt:"Aplicar modelo"}})],1)]),a._v(" "),o("li",[o("p",[a._v("Adicione a operação "),o("strong",[a._v("Carregar modelo")]),a._v(" e selecione o modelo que será utilizado no fluxo."),o("br"),a._v(" "),o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image7.png",alt:"Tabela - Carregar modelo"}})])]),a._v(" "),o("li",[o("p",[a._v("Adicione a operação [Avaliar Modelo] selecione "),o("em",[a._v("“prediction”")]),a._v(" no campo "),o("strong",[a._v("Atributo usado para predição")]),a._v(", "),o("em",[a._v("“class_label”")]),a._v(" no "),o("strong",[a._v("Atributo usado como label")]),a._v(", "),o("em",[a._v("“Acurácia”")]),a._v(" em "),o("strong",[a._v("Métrica usada para avaliação")]),a._v(".\n"),o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image2.png",alt:"Tabela - Avaliar modelo"}})])]),a._v(" "),o("li",[o("p",[a._v("Execute o fluxo e visualize o resultado."),o("br"),a._v(" "),o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image1.png",alt:"Gráfico - Resultado"}}),o("br"),a._v(" "),o("img",{attrs:{src:"/docs/img/spark/entrada_e_saida/carregar_modelo/image8.png",alt:"Tabela - Resultado"}})])])]),a._v(" "),o("hr"),a._v(" "),o("p",[a._v("Dúvidas e/ou sugestões envie um e-mail para [email protected]")])])},[],!1,null,null,null);e.default=t.exports}}]); |
import rasterstats as rs
import pandas as pd
vector = "/path/to/some/vector.geojson"
raster = "/sciclone/aiddata10/REU/some/raster.tif"
output = "/path/to/results.csv"
stats = rs.zonal_stats(vector, raster, stats="mean", geojson_out=True)
# to csv
x = [i['properties'] for i in stats]
out = pd.DataFrame(x)
out.to_csv(output, index=False, encoding='utf-8')
# to geojson
|
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from .Model import Model
from numpy.random import RandomState
torch.manual_seed(123)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(123)
class QuatRE(Model):
def __init__(self, config):
super(QuatRE, self).__init__(config)
self.ent_embeddings = nn.Embedding(self.config.entTotal, 4 * self.config.hidden_size) # vectorized quaternion
self.rel_embeddings = nn.Embedding(self.config.relTotal, 4 * self.config.hidden_size)
self.Whr = nn.Embedding(self.config.relTotal, 4 * self.config.hidden_size)
self.Wtr = nn.Embedding(self.config.relTotal, 4 * self.config.hidden_size)
self.criterion = nn.Softplus()
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.ent_embeddings.weight.data)
nn.init.xavier_uniform_(self.rel_embeddings.weight.data)
nn.init.xavier_uniform_(self.Whr.weight.data)
nn.init.xavier_uniform_(self.Wtr.weight.data)
@staticmethod
def normalization(quaternion, split_dim=1): # vectorized quaternion bs x 4dim
size = quaternion.size(split_dim) // 4
quaternion = quaternion.reshape(-1, 4, size) # bs x 4 x dim
quaternion = quaternion / torch.sqrt(torch.sum(quaternion ** 2, 1, True)) # quaternion / norm
quaternion = quaternion.reshape(-1, 4 * size)
return quaternion
@staticmethod
def make_wise_quaternion(quaternion): # for vector * vector quaternion element-wise multiplication
if len(quaternion.size()) == 1:
quaternion = quaternion.unsqueeze(0)
size = quaternion.size(1) // 4
r, i, j, k = torch.split(quaternion, size, dim=1)
r2 = torch.cat([r, -i, -j, -k], dim=1) # 0, 1, 2, 3 --> bs x 4dim
i2 = torch.cat([i, r, -k, j], dim=1) # 1, 0, 3, 2
j2 = torch.cat([j, k, r, -i], dim=1) # 2, 3, 0, 1
k2 = torch.cat([k, -j, i, r], dim=1) # 3, 2, 1, 0
return r2, i2, j2, k2
@staticmethod
def get_quaternion_wise_mul(quaternion):
size = quaternion.size(1) // 4
quaternion = quaternion.view(-1, 4, size)
quaternion = torch.sum(quaternion, 1)
return quaternion
@staticmethod
def vec_vec_wise_multiplication(q, p): # vector * vector
normalized_p = QuatRE.normalization(p) # bs x 4dim
q_r, q_i, q_j, q_k = QuatRE.make_wise_quaternion(q) # bs x 4dim
qp_r = QuatRE.get_quaternion_wise_mul(q_r * normalized_p) # qrpr−qipi−qjpj−qkpk
qp_i = QuatRE.get_quaternion_wise_mul(q_i * normalized_p) # qipr+qrpi−qkpj+qjpk
qp_j = QuatRE.get_quaternion_wise_mul(q_j * normalized_p) # qjpr+qkpi+qrpj−qipk
qp_k = QuatRE.get_quaternion_wise_mul(q_k * normalized_p) # qkpr−qjpi+qipj+qrpk
return torch.cat([qp_r, qp_i, qp_j, qp_k], dim=1)
@staticmethod
def _calc(h, r, t, hr, tr):
h_r = QuatRE.vec_vec_wise_multiplication(h, hr)
t_r = QuatRE.vec_vec_wise_multiplication(t, tr)
hrr = QuatRE.vec_vec_wise_multiplication(h_r, r)
hrt = hrr * t_r
return -torch.sum(hrt, -1)
@staticmethod
def regularization(quaternion): # vectorized quaternion bs x 4dim
size = quaternion.size(1) // 4
r, i, j, k = torch.split(quaternion, size, dim=1)
return torch.mean(r ** 2) + torch.mean(i ** 2) + torch.mean(j ** 2) + torch.mean(k ** 2)
def loss(self, score, regul):
return torch.mean(self.criterion(score * self.batch_y)) + self.config.lmbda * regul
def forward(self):
h = self.ent_embeddings(self.batch_h)
r = self.rel_embeddings(self.batch_r)
t = self.ent_embeddings(self.batch_t)
hr = self.Whr(self.batch_r)
tr = self.Wtr(self.batch_r)
score = QuatRE._calc(h, r, t, hr, tr)
regul = self.regularization(h) + self.regularization(r) + self.regularization(t) + self.regularization(hr) + self.regularization(tr)
return self.loss(score, regul)
def predict(self):
h = self.ent_embeddings(self.batch_h)
r = self.rel_embeddings(self.batch_r)
t = self.ent_embeddings(self.batch_t)
hr = self.Whr(self.batch_r)
tr = self.Wtr(self.batch_r)
score = QuatRE._calc(h, r, t, hr, tr)
return score.cpu().data.numpy()
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tencent is pleased to support the open source community by making behaviac available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at http://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed under the License is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef GLOBALS_H
#define GLOBALS_H
#include "GameObject.h"
#include "WorldState.h"
#include "Renderer.h"
#include "GameLogic.h"
namespace framework
{
//these represent the overall context
/// The current state of the level
extern WorldState* ws;
/// Renders to the GraphicContext
extern Renderer* renderer;
/// Accepts player input and drives the event loop
extern GameLogic* gl;
}
#endif
|
// //c语言
// int a = 123;
// //js 语言
// var a = 123
// var b = "123"
// var c = function apple(){
// console.log('apple');
// }
// let d = 456
// let e = "456"
// let f = function orange(){
// console.log("orange");
// }
// function lemon(){
// console.log("lemon")
// }
// lemon()
// let le = function banana(){
// console.log("banana")
// }
// le();
// banana()
// let le2= function(){
// console.log("banana le2")
// }
// le2()
// function apple2(){
// let name= "apple2";
// function apple2Son(){
// console.log("apple2Son");
// }
// apple2Son()
// return {
// apple2Son: apple2Son
// }
// }
// apple2().apple2Son()
// var a1 = 123
// //语法糖
// var a2 = {}
// console.log(typeof a1)
// console.log(typeof a2)
// var a3 =Object.create(null);
// var a4 = function (){
// }
//es6
//箭头函数
// var a5 = (x,y)=>{
// console.log(x+y)
// }
// a5(5,4)
// console.log(typeof a5)
// var a5 = function apple5(){
// console.log('apple5');
// return 100;
// }()
// // apple5()
// console.log(a5);
//闭包
// (function(x,y){
// console.log("江西软件大学")
// console.log(x+y)
// })(3,6)
//原型
// class Furit{
function Furit(){
this.name = "Furit"
console.log("this is furit");
}
// }
Furit.prototype.apple= function apple(){
console.log("this is apple");
}
var a88 = new Furit();
console.log(a88.apple())
//一点java 代码
/**
*
* package test;
public class TestMain {
public static void main(String[] args) {
// TODO Auto-generated method stub
System.out.println("this is java");
Furit apple = new Furit("苹果",1);
Furit orange = new Furit("橘子",2);
}
}
class Furit {
private String name;
private int age ;
public Furit(String name, int age) {
this.name=name;
this.age = age;
}
public void printName() {
System.out.println(this.name);
}
}
*/
// function Person(name,height){
// this.name=name;
// this.height=height;
// }
// Person.prototype.hobby=function(){
// return 'watching movies';
// }
// var boy=new Person('keith',180);
// var girl=new Person('rascal',153);
// console.log(boy.name); //'keith'
// console.log(girl.name); //'rascal'
// console.log(boy.hobby===girl.hobby); //true
|
/**
* author Christopher Blum
* - based on the idea of Remy Sharp, http://remysharp.com/2009/01/26/element-in-view-event-plugin/
* - forked from http://github.com/zuk/jquery.inview/
*/
(function ($) {
var inviewObjects = {}, viewportSize, viewportOffset,
d = document, w = window, documentElement = d.documentElement, expando = $.expando;
$.event.special.inview = {
add: function(data) {
inviewObjects[data.guid + "-" + this[expando]] = { data: data, $element: $(this) };
},
remove: function(data) {
try { delete inviewObjects[data.guid + "-" + this[expando]]; } catch(e) {}
}
};
function getViewportSize() {
var mode, domObject, size = { height: w.innerHeight, width: w.innerWidth };
// if this is correct then return it. iPad has compat Mode, so will
// go into check clientHeight/clientWidth (which has the wrong value).
if (!size.height) {
mode = d.compatMode;
if (mode || !$.support.boxModel) { // IE, Gecko
domObject = mode === 'CSS1Compat' ?
documentElement : // Standards
d.body; // Quirks
size = {
height: domObject.clientHeight,
width: domObject.clientWidth
};
}
}
return size;
}
function getViewportOffset() {
return {
top: w.pageYOffset || documentElement.scrollTop || d.body.scrollTop,
left: w.pageXOffset || documentElement.scrollLeft || d.body.scrollLeft
};
}
function checkInView() {
var $elements = $(), elementsLength, i = 0;
$.each(inviewObjects, function(i, inviewObject) {
var selector = inviewObject.data.selector,
$element = inviewObject.$element;
$elements = $elements.add(selector ? $element.find(selector) : $element);
});
elementsLength = $elements.length;
if (elementsLength) {
viewportSize = viewportSize || getViewportSize();
viewportOffset = viewportOffset || getViewportOffset();
for (; i<elementsLength; i++) {
// Ignore elements that are not in the DOM tree
if (!$.contains(documentElement, $elements[i])) {
continue;
}
var $element = $($elements[i]),
elementSize = { height: $element.height(), width: $element.width() },
elementOffset = $element.offset(),
inView = $element.data('inview'),
visiblePartX,
visiblePartY,
visiblePartsMerged;
// Don't ask me why because I haven't figured out yet:
// viewportOffset and viewportSize are sometimes suddenly null in Firefox 5.
// Even though it sounds weird:
// It seems that the execution of this function is interferred by the onresize/onscroll event
// where viewportOffset and viewportSize are unset
if (!viewportOffset || !viewportSize) {
return;
}
if (elementOffset.top + elementSize.height > viewportOffset.top &&
elementOffset.top < viewportOffset.top + viewportSize.height &&
elementOffset.left + elementSize.width > viewportOffset.left &&
elementOffset.left < viewportOffset.left + viewportSize.width) {
visiblePartX = (viewportOffset.left > elementOffset.left ?
'right' : (viewportOffset.left + viewportSize.width) < (elementOffset.left + elementSize.width) ?
'left' : 'both');
visiblePartY = (viewportOffset.top > elementOffset.top ?
'bottom' : (viewportOffset.top + viewportSize.height) < (elementOffset.top + elementSize.height) ?
'top' : 'both');
visiblePartsMerged = visiblePartX + "-" + visiblePartY;
if (!inView || inView !== visiblePartsMerged) {
$element.data('inview', visiblePartsMerged).trigger('inview', [true, visiblePartX, visiblePartY]);
}
} else if (inView) {
$element.data('inview', false).trigger('inview', [false]);
}
}
}
}
$(w).bind("scroll resize", function() {
viewportSize = viewportOffset = null;
});
// IE < 9 scrolls to focused elements without firing the "scroll" event
if (!documentElement.addEventListener && documentElement.attachEvent) {
documentElement.attachEvent("onfocusin", function() {
viewportOffset = null;
});
}
// Use setInterval in order to also make sure this captures elements within
// "overflow:scroll" elements or elements that appeared in the dom tree due to
// dom manipulation and reflow
// old: $(window).scroll(checkInView);
//
// By the way, iOS (iPad, iPhone, ...) seems to not execute, or at least delays
// intervals while the user scrolls. Therefore the inview event might fire a bit late there
setInterval(checkInView, 250);
})(jQuery); |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] in ('publish', 'release'):
raise Exception('this is a test app, do not release it!')
readme = 'A simple test application to test djactasauth'
setup(
name='testapp',
version='0.0.0',
description=readme,
long_description=readme,
author='Paessler AG',
url='https://github.com/PaesslerAG/django-act-as-auth',
packages=[
'testapp',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django-act-as-auth',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
|
import React, { PropTypes } from 'react';
class NavHeader extends React.Component {
handleBack(){
this.context.router.goBack();
}
render () {
return(
<div className="nav-header">
<p onClick={this.handleBack.bind(this)}>
<span className="glyphicon glyphicon-arrow-left" aria-hidden="true">back</span></p>
<h3>xixilide@{this.props.title}</h3>
<span className="glyphicon glyphicon-record"></span>
</div>
)
}
}
NavHeader.contextTypes = {
router: React.PropTypes.object.isRequired
}
export default NavHeader;
|
from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
app_name = 'djangoapp'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
# path for about view
path(route='about', view=views.about, name='about'),
# path for contact us view
path(route='contact', view=views.contact, name='contact'),
# path for registration
path(route='registration', view=views.registration_request, name='registration'),
# path for login
path(route='login', view=views.login_request, name='login'),
# path for logout
path(route='logout', view=views.logout_request, name='logout'),
path(route='', view=views.get_dealerships, name='index'),
# path for dealer reviews view
path(route='dealer/<int:dealer_id>/', view=views.get_dealer_details, name='dealer_details'),
# path for add a review view
path(route='addreview/<int:dealer_id>/', view=views.add_review, name='addreview')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
import unittest
from datetime import datetime
from ocdb.core.models import Dataset, ISSUE_TYPE_ERROR, ISSUE_TYPE_WARNING
from ocdb.core.val._meta_field_compare_rule import MetaFieldCompareRule
from tests.core.val._mock_library import MockLibrary
class MetaFieldCompareRuleTest(unittest.TestCase):
def setUp(self):
self._lib = MockLibrary()
def test_float_larger_equal_success(self):
rule = MetaFieldCompareRule("north_lat", "south_lat", ">=", error="@south_north_mismatch", data_type="number")
dataset = Dataset({"north_lat": "34.56", "south_lat": "28.33"}, [])
self.assertIsNone(rule.eval(dataset, self._lib))
def test_float_larger_equal_error(self):
rule = MetaFieldCompareRule("north_lat", "south_lat", ">=", error="@south_north_mismatch", data_type="number")
dataset = Dataset({"north_lat": "11.56", "south_lat": "28.33"}, [])
issue = rule.eval(dataset, self._lib)
self.assertIsNotNone(issue)
self.assertEqual(ISSUE_TYPE_ERROR, issue.type)
self.assertEqual("@south_north_mismatch", issue.description)
def test_float_not_equal_warning(self):
rule = MetaFieldCompareRule("north_lat", "south_lat", "!=", warning="should not be equal", data_type="number")
dataset = Dataset({"north_lat": "28.33", "south_lat": "28.33"}, [])
issue = rule.eval(dataset, self._lib)
self.assertIsNotNone(issue)
self.assertEqual(ISSUE_TYPE_WARNING, issue.type)
self.assertEqual("should not be equal", issue.description)
def test_int_less_than_error(self):
rule = MetaFieldCompareRule("int_1", "int_2", "<", warning="should be smaller", data_type="number")
dataset = Dataset({"int_1": "16", "int_2": "15"}, [])
issue = rule.eval(dataset, self._lib)
self.assertIsNotNone(issue)
self.assertEqual(ISSUE_TYPE_WARNING, issue.type)
self.assertEqual("should be smaller", issue.description)
def test_int_equal_invalid_field(self):
rule = MetaFieldCompareRule("int_1", "int_2", "==", error="must be same", data_type="number")
dataset = Dataset({"int_1": "16"}, [])
issue = rule.eval(dataset, self._lib)
self.assertIsNotNone(issue)
self.assertEqual(ISSUE_TYPE_ERROR, issue.type)
self.assertEqual("Requested field not contained in metadata: int_2", issue.description)
def test_date_smaller_equal_success(self):
rule = MetaFieldCompareRule("start_date", "end_date", "<=", error="end date before start date", data_type="date")
dataset = Dataset({"start_date": "20080416", "end_date": "20080416"}, [])
self.assertIsNone(rule.eval(dataset, self._lib))
def test_date_smaller_equal_failure(self):
rule = MetaFieldCompareRule("start_date", "end_date", "<", error="end date before start date", data_type="date")
dataset = Dataset({"start_date": "20080416", "end_date": "20080416"}, [])
issue = rule.eval(dataset, self._lib)
self.assertIsNotNone(issue)
self.assertEqual("end date before start date", issue.description)
self.assertEqual("ERROR", issue.type)
def test_extract_value_not_present(self):
metadata = {"bla": "whocares"}
rule = MetaFieldCompareRule("north_lat", "south_lat", ">=", error="@south_north_mismatch", data_type="number")
self.assertIsNone(rule._extract_value("north_lat", metadata))
def test_extract_value_number(self):
metadata = {"south_lat": "67.555"}
rule = MetaFieldCompareRule("north_lat", "south_lat", ">=", error="@south_north_mismatch", data_type="number")
self.assertEqual(67.555, rule._extract_value("south_lat", metadata))
def test_extract_value_number_with_unit(self):
metadata = {"south_lat": "68.666[DEG]"}
rule = MetaFieldCompareRule("north_lat", "south_lat", ">=", error="@south_north_mismatch", data_type="number")
self.assertEqual(68.666, rule._extract_value("south_lat", metadata))
def test_extract_value_date(self):
metadata = {"start_date": "20121113"}
rule = MetaFieldCompareRule("end_date", "start_date", ">=", error="@whatever", data_type="date")
self.assertEqual(datetime(2012, 11, 13), rule._extract_value("start_date", metadata))
def test_convert_date_string(self):
self.assertEqual("2008-09-23", MetaFieldCompareRule._convert_date_string("20080923"))
|
/*
* Copyright 2010-2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#import "../AmazonServiceRequestConfig.h"
/**
* Reset Image Attribute Request
*/
@interface EC2ResetImageAttributeRequest:AmazonServiceRequestConfig
{
NSString *imageId;
NSString *attribute;
}
/**
* The ID of the AMI whose attribute is being reset.
*/
@property (nonatomic, retain) NSString *imageId;
/**
* The name of the attribute being reset. <p> Available attribute names:
* <code>launchPermission</code>
*/
@property (nonatomic, retain) NSString *attribute;
/**
* Default constructor for a new ResetImageAttributeRequest object. Callers should use the
* property methods to initialize this object after creating it.
*/
-(id)init;
/**
* Constructs a new ResetImageAttributeRequest object.
* Callers should use properties to initialize any additional object members.
*
* @param theImageId The ID of the AMI whose attribute is being reset.
* @param theAttribute The name of the attribute being reset. <p>
* Available attribute names: <code>launchPermission</code>
*/
-(id)initWithImageId:(NSString *)theImageId andAttribute:(NSString *)theAttribute;
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*/
-(NSString *)description;
@end
|
"""Test module for the overall module."""
|
import re
import mlflow_extend
def test_version_exists() -> None:
assert hasattr(mlflow_extend, "__version__")
def test_version_format_is_valid() -> None:
assert re.search(r"^\d+\.\d+\.\d+$", mlflow_extend.__version__) is not None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Create a Proxy Class
#
# In this assignment, create a proxy class (one is started for you
# below). You should be able to initialize the proxy object with any
# object. Any attributes called on the proxy object should be forwarded
# to the target object. As each attribute call is sent, the proxy should
# record the name of the attribute sent.
#
# The proxy class is started for you. You will need to add a method
# missing handler and any other supporting methods. The specification
# of the Proxy class is given in the AboutProxyObjectProject koan.
# Note: This is a bit trickier that its Ruby Koans counterpart, but you
# can do it!
from runner.koan import *
class Proxy:
def __init__(self, target_object):
# WRITE CODE HERE
#initialize '_obj' attribute last. Trust me on this!
self._obj = target_object
# WRITE CODE HERE
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel'], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
with self.assertRaises(AttributeError):
tv.no_such_method()
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel'))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television:
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
|
'use strict';
const deepCompare = (actual, expect) => actual === expect || Object.is(actual, expect) || (Object(actual) === actual && Object(expect) === expect) && (Array.isArray(actual) && Array.isArray(expect) && actual.length === expect.length && expect.every((expect, index) => deepCompare(actual[index], expect)) || Reflect.ownKeys(actual).length === Reflect.ownKeys(expect).length && Reflect.ownKeys(expect).every((key) => deepCompare(actual[key], expect[key])));
/**
* converts a value to it's truthiness
* @param {any} val - the value to convert
* @returns {string} "truey" or "falsey"
*/
const truthiness = (val) => {
return `${Boolean(val)}y`;
};
// returns an empty array for an empty array
const _1_arg = [];
const _1_expect = [];
const _1_test = deepCompare(_1_arg.map(truthiness), _1_expect) ? 'pass' : 'fail';
console.assert(_1_test, 'Test 1');
// works when all values are truthy
const _2_arg = [100, 'hello', true];
const _2_expect = ['truey', 'truey', 'truey'];
const _2_test = deepCompare(_2_arg.map(truthiness), _2_expect) ? 'pass' : 'fail';
console.assert(_2_test, 'Test 2');
// works when all values are falsy
const _3_arg = ['', 0, NaN, null];
const _3_expect = ['falsey', 'falsey', 'falsey', 'falsey'];
const _3_test = deepCompare(_3_arg.map(truthiness), _3_expect) ? 'pass' : 'fail';
console.assert(_3_test, 'Test 3');
// works when there are mixed values
const _4_arg = [true, 0, NaN, 'hello'];
const _4_expect = ['truey', 'falsey', 'falsey', 'truey'];
const _4_test = deepCompare(_4_arg.map(truthiness), _4_expect) ? 'pass' : 'fail';
console.assert(_4_test, 'Test 4');
|
"""Commands part of Websocket API."""
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_READ
from homeassistant.const import MATCH_ALL, EVENT_TIME_CHANGED, EVENT_STATE_CHANGED
from homeassistant.core import callback, DOMAIN as HASS_DOMAIN
from homeassistant.exceptions import Unauthorized, ServiceNotFound, HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.helpers.event import async_track_state_change
from . import const, decorators, messages
@callback
def async_register_commands(hass, async_reg):
"""Register commands."""
async_reg(hass, handle_subscribe_events)
async_reg(hass, handle_unsubscribe_events)
async_reg(hass, handle_call_service)
async_reg(hass, handle_get_states)
async_reg(hass, handle_get_services)
async_reg(hass, handle_get_config)
async_reg(hass, handle_ping)
async_reg(hass, handle_render_template)
def pong_message(iden):
"""Return a pong message."""
return {"id": iden, "type": "pong"}
@callback
@decorators.websocket_command(
{
vol.Required("type"): "subscribe_events",
vol.Optional("event_type", default=MATCH_ALL): str,
}
)
def handle_subscribe_events(hass, connection, msg):
"""Handle subscribe events command.
Async friendly.
"""
from .permissions import SUBSCRIBE_WHITELIST
event_type = msg["event_type"]
if event_type not in SUBSCRIBE_WHITELIST and not connection.user.is_admin:
raise Unauthorized
if event_type == EVENT_STATE_CHANGED:
@callback
def forward_events(event):
"""Forward state changed events to websocket."""
if not connection.user.permissions.check_entity(
event.data["entity_id"], POLICY_READ
):
return
connection.send_message(messages.event_message(msg["id"], event))
else:
@callback
def forward_events(event):
"""Forward events to websocket."""
if event.event_type == EVENT_TIME_CHANGED:
return
connection.send_message(messages.event_message(msg["id"], event.as_dict()))
connection.subscriptions[msg["id"]] = hass.bus.async_listen(
event_type, forward_events
)
connection.send_message(messages.result_message(msg["id"]))
@callback
@decorators.websocket_command(
{
vol.Required("type"): "unsubscribe_events",
vol.Required("subscription"): cv.positive_int,
}
)
def handle_unsubscribe_events(hass, connection, msg):
"""Handle unsubscribe events command.
Async friendly.
"""
subscription = msg["subscription"]
if subscription in connection.subscriptions:
connection.subscriptions.pop(subscription)()
connection.send_message(messages.result_message(msg["id"]))
else:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_NOT_FOUND, "Subscription not found."
)
)
@decorators.async_response
@decorators.websocket_command(
{
vol.Required("type"): "call_service",
vol.Required("domain"): str,
vol.Required("service"): str,
vol.Optional("service_data"): dict,
}
)
async def handle_call_service(hass, connection, msg):
"""Handle call service command.
Async friendly.
"""
blocking = True
if msg["domain"] == HASS_DOMAIN and msg["service"] in ["restart", "stop"]:
blocking = False
try:
await hass.services.async_call(
msg["domain"],
msg["service"],
msg.get("service_data"),
blocking,
connection.context(msg),
)
connection.send_message(messages.result_message(msg["id"]))
except ServiceNotFound as err:
if err.domain == msg["domain"] and err.service == msg["service"]:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_NOT_FOUND, "Service not found."
)
)
else:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_HOME_ASSISTANT_ERROR, str(err)
)
)
except HomeAssistantError as err:
connection.logger.exception(err)
connection.send_message(
messages.error_message(msg["id"], const.ERR_HOME_ASSISTANT_ERROR, str(err))
)
except Exception as err: # pylint: disable=broad-except
connection.logger.exception(err)
connection.send_message(
messages.error_message(msg["id"], const.ERR_UNKNOWN_ERROR, str(err))
)
@callback
@decorators.websocket_command({vol.Required("type"): "get_states"})
def handle_get_states(hass, connection, msg):
"""Handle get states command.
Async friendly.
"""
if connection.user.permissions.access_all_entities("read"):
states = hass.states.async_all()
else:
entity_perm = connection.user.permissions.check_entity
states = [
state
for state in hass.states.async_all()
if entity_perm(state.entity_id, "read")
]
connection.send_message(messages.result_message(msg["id"], states))
@decorators.async_response
@decorators.websocket_command({vol.Required("type"): "get_services"})
async def handle_get_services(hass, connection, msg):
"""Handle get services command.
Async friendly.
"""
descriptions = await async_get_all_descriptions(hass)
connection.send_message(messages.result_message(msg["id"], descriptions))
@callback
@decorators.websocket_command({vol.Required("type"): "get_config"})
def handle_get_config(hass, connection, msg):
"""Handle get config command.
Async friendly.
"""
connection.send_message(messages.result_message(msg["id"], hass.config.as_dict()))
@callback
@decorators.websocket_command({vol.Required("type"): "ping"})
def handle_ping(hass, connection, msg):
"""Handle ping command.
Async friendly.
"""
connection.send_message(pong_message(msg["id"]))
@callback
@decorators.websocket_command(
{
vol.Required("type"): "render_template",
vol.Required("template"): cv.template,
vol.Optional("entity_ids"): cv.entity_ids,
vol.Optional("variables"): dict,
}
)
def handle_render_template(hass, connection, msg):
"""Handle render_template command.
Async friendly.
"""
template = msg["template"]
template.hass = hass
variables = msg.get("variables")
entity_ids = msg.get("entity_ids")
if entity_ids is None:
entity_ids = template.extract_entities(variables)
@callback
def state_listener(*_):
connection.send_message(
messages.event_message(
msg["id"], {"result": template.async_render(variables)}
)
)
if entity_ids and entity_ids != MATCH_ALL:
connection.subscriptions[msg["id"]] = async_track_state_change(
hass, entity_ids, state_listener
)
else:
connection.subscriptions[msg["id"]] = lambda: None
connection.send_result(msg["id"])
state_listener()
|
import logging
from config.configuration import Configuration
from geocodio import GeocodioClient
from geocodio.exceptions import GeocodioAuthError, GeocodioDataError, GeocodioServerError, GeocodioError
_LOGGER = logging.getLogger('mme')
class Geocoding:
_geocodio_client = None
def initialize_geocodio(config: Configuration):
Geocoding._geocodio_client = None
try:
if config.geocodio.enable:
Geocoding._geocodio_client = GeocodioClient(config.geocodio.api_key)
_LOGGER.info(f"Using the geocod.io service for reverse geocoding of locations")
except AttributeError:
_LOGGER.error(f"YAML file error setting up geocod.io reverse geocoding")
pass
def reverse_geocode(latitude: float, longitude: float) -> str:
if Geocoding._geocodio_client:
formatted_address = f"({latitude:.06f}, {longitude:.06f})"
try:
reversed = Geocoding._geocodio_client.reverse((latitude, longitude))
components = Geocoding._geocodio_client.parse(reversed.formatted_address).get('address_components', None)
if components:
formatted_address = f"{components.get('formatted_street')}, {components.get('city')}, {components.get('state')}"
except (GeocodioAuthError, GeocodioDataError, GeocodioServerError, GeocodioError) as e:
pass # _LOGGER.error(f"geocod.io reverse geocoding error: {e}")
finally:
return formatted_address
else:
return f"({latitude:.06f}, {longitude:.06f})"
def parse_address(address: str) -> dict:
address_components = None
if Geocoding._geocodio_client:
try:
address_components = Geocoding._geocodio_client.parse(address).get('address_components', None)
except (GeocodioAuthError, GeocodioDataError, GeocodioServerError, GeocodioError) as e:
_LOGGER.error(f"geocod.io parsing address error: {e}")
return address_components
|
// Copyright 2007 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Defines a 2-element vector class that can be used for
* coordinate math, useful for animation systems and point manipulation.
*
* Vec2 objects inherit from goog.math.Coordinate and may be used wherever a
* Coordinate is required. Where appropriate, Vec2 functions accept both Vec2
* and Coordinate objects as input.
*
*/
goog.provide('goog.math.Vec2');
goog.require('goog.math');
goog.require('goog.math.Coordinate');
/**
* Class for a two-dimensional vector object and assorted functions useful for
* manipulating points.
*
* @param {number=} opt_x The x coordinate for the vector.
* @param {number=} opt_y The y coordinate for the vector.
* @constructor
* @extends {goog.math.Coordinate}
*/
goog.math.Vec2 = function(opt_x, opt_y) {
/**
* X-value
* @type {number}
*/
this.x = Number(opt_x) || 0;
/**
* Y-value
* @type {number}
*/
this.y = Number(opt_y) || 0;
};
goog.inherits(goog.math.Vec2, goog.math.Coordinate);
/**
* @return {!goog.math.Vec2} A random unit-length vector.
*/
goog.math.Vec2.randomUnit = function() {
var angle = Math.random() * Math.PI * 2;
return new goog.math.Vec2(Math.cos(angle), Math.sin(angle));
};
/**
* @return {!goog.math.Vec2} A random vector inside the unit-disc.
*/
goog.math.Vec2.random = function() {
var mag = Math.sqrt(Math.random());
var angle = Math.random() * Math.PI * 2;
return new goog.math.Vec2(Math.cos(angle) * mag, Math.sin(angle) * mag);
};
/**
* Returns a new Vec2 object from a given coordinate.
* @param {!goog.math.Coordinate} a The coordinate.
* @return {!goog.math.Vec2} A new vector object.
*/
goog.math.Vec2.fromCoordinate = function(a) {
return new goog.math.Vec2(a.x, a.y);
};
/**
* @return {!goog.math.Vec2} A new vector with the same coordinates as this one.
*/
goog.math.Vec2.prototype.clone = function() {
return new goog.math.Vec2(this.x, this.y);
};
/**
* Returns the magnitude of the vector measured from the origin.
* @return {number} The length of the vector.
*/
goog.math.Vec2.prototype.magnitude = function() {
return Math.sqrt(this.x * this.x + this.y * this.y);
};
/**
* Returns the squared magnitude of the vector measured from the origin.
* NOTE(user): Leaving out the square root is not a significant
* optimization in JavaScript.
* @return {number} The length of the vector, squared.
*/
goog.math.Vec2.prototype.squaredMagnitude = function() {
return this.x * this.x + this.y * this.y;
};
/**
* Scales the current vector by a constant.
* @param {number} s The scale factor.
* @return {!goog.math.Vec2} The scaled vector.
*/
goog.math.Vec2.prototype.scale = function(s) {
this.x *= s;
this.y *= s;
return this;
};
/**
* Reverses the sign of the vector. Equivalent to scaling the vector by -1.
* @return {!goog.math.Vec2} The inverted vector.
*/
goog.math.Vec2.prototype.invert = function() {
this.x = -this.x;
this.y = -this.y;
return this;
};
/**
* Normalizes the current vector to have a magnitude of 1.
* @return {!goog.math.Vec2} The normalized vector.
*/
goog.math.Vec2.prototype.normalize = function() {
return this.scale(1 / this.magnitude());
};
/**
* Adds another vector to this vector in-place. Uses goog.math.Vec2.sum(a, b) to
* return a new vector.
* @param {!goog.math.Coordinate} b The vector to add.
* @return {!goog.math.Vec2} This vector with {@code b} added.
*/
goog.math.Vec2.prototype.add = function(b) {
this.x += b.x;
this.y += b.y;
return this;
};
/**
* Subtracts another vector from this vector in-place. Uses
* goog.math.Vec2.difference(a, b) to return a new vector.
* @param {!goog.math.Coordinate} b The vector to subtract.
* @return {!goog.math.Vec2} This vector with {@code b} subtracted.
*/
goog.math.Vec2.prototype.subtract = function(b) {
this.x -= b.x;
this.y -= b.y;
return this;
};
/**
* Compares this vector with another for equality.
* @param {!goog.math.Vec2} b The other vector.
* @return {boolean} Whether this vector has the same x and y as the given
* vector.
*/
goog.math.Vec2.prototype.equals = function(b) {
return this == b || !!b && this.x == b.x && this.y == b.y;
};
/**
* Returns the distance between two vectors.
* @param {!goog.math.Coordinate} a The first vector.
* @param {!goog.math.Coordinate} b The second vector.
* @return {number} The distance.
*/
goog.math.Vec2.distance = goog.math.Coordinate.distance;
/**
* Returns the squared distance between two vectors.
* @param {!goog.math.Coordinate} a The first vector.
* @param {!goog.math.Coordinate} b The second vector.
* @return {number} The squared distance.
*/
goog.math.Vec2.squaredDistance = goog.math.Coordinate.squaredDistance;
/**
* Compares vectors for equality.
* @param {!goog.math.Coordinate} a The first vector.
* @param {!goog.math.Coordinate} b The second vector.
* @return {boolean} Whether the vectors have the same x and y coordinates.
*/
goog.math.Vec2.equals = goog.math.Coordinate.equals;
/**
* Returns the sum of two vectors as a new Vec2.
* @param {!goog.math.Coordinate} a The first vector.
* @param {!goog.math.Coordinate} b The second vector.
* @return {!goog.math.Vec2} The sum vector.
*/
goog.math.Vec2.sum = function(a, b) {
return new goog.math.Vec2(a.x + b.x, a.y + b.y);
};
/**
* Returns the difference between two vectors as a new Vec2.
* @param {!goog.math.Coordinate} a The first vector.
* @param {!goog.math.Coordinate} b The second vector.
* @return {!goog.math.Vec2} The difference vector.
*/
goog.math.Vec2.difference = function(a, b) {
return new goog.math.Vec2(a.x - b.x, a.y - b.y);
};
/**
* Returns the dot-product of two vectors.
* @param {!goog.math.Coordinate} a The first vector.
* @param {!goog.math.Coordinate} b The second vector.
* @return {number} The dot-product of the two vectors.
*/
goog.math.Vec2.dot = function(a, b) {
return a.x * b.x + a.y * b.y;
};
/**
* Returns a new Vec2 that is the linear interpolant between vectors a and b at
* scale-value x.
* @param {!goog.math.Coordinate} a Vector a.
* @param {!goog.math.Coordinate} b Vector b.
* @param {number} x The proportion between a and b.
* @return {!goog.math.Vec2} The interpolated vector.
*/
goog.math.Vec2.lerp = function(a, b, x) {
return new goog.math.Vec2(goog.math.lerp(a.x, b.x, x),
goog.math.lerp(a.y, b.y, x));
};
|
'use strict';
var fetch = require('../lib/index.js');
var wd = require('webdriver-client')({
platformName: 'desktop',
browserName: 'chrome'
});
describe('core testsuite', function() {
this.timeout(5 * 60 * 1000);
const driver = wd.initPromiseChain();
before(() => {
return driver
.initDriver()
.setWindowSize(1280, 800);
});
var request = {
comment: function getComment() {
return fetch.get('http://azu.github.io/promises-book/json/comment.json').then(JSON.parse);
},
people: function getPeople() {
return fetch.get('http://azu.github.io/promises-book/json/people.json').then(JSON.parse);
}
};
function main() {
return fetch.sequenceTasks([request.comment, request.people]);
}
it('#0 sequenceTasks should be send by the given sequence', function() {
driver.
get('http://azu.github.io/promises-book/json/comment.json');
fetch.sequenceTasks([request.comment, request.people])
.then(function (value) {
console.log(value);
}).catch(function(error){
console.error(error);
});
});
after((done) => {
return driver
.quit(done)
});
});
|
"""
Django settings for udemy project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import dj_database_url
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=True, cast=bool)
ALLOWED_HOSTS = ['*']
# Application definition
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = []
LOCAL_APPS = [
'core',
'courses',
]
INSTALLED_APPS = LOCAL_APPS + THIRD_PARTY_APPS + DEFAULT_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'udemy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'udemy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default=config('DATABASE_URL'))
}
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'database.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Telegram configuration
TELEGRAM_TOKEN = config('TELEGRAM_TOKEN', default='')
# Udemy configuration
UDEMY_URL = config('UDEMY_URL', default='https://www.udemy.com')
UDEMY_CLIENT_ID = config('UDEMY_CLIENT_ID', default='')
UDEMY_CLIENT_SECRET = config('UDEMY_CLIENT_SECRET', default='')
# Twitter configuration
TWITTER_API_KEY = config('TWITTER_API_KEY', default='')
TWITTER_API_SECRET = config('TWITTER_API_SECRET', default='')
TWITTER_OAUTH_TOKEN = config('TWITTER_OAUTH_TOKEN', default='')
TWITTER_OAUTH_TOKEN_SECRET = config('TWITTER_OAUTH_TOKEN_SECRET', default='') |
#!/usr/bin/env python3
from tutorial import Calculator
from tutorial.ttypes import InvalidOperation, Operation
from shared.ttypes import SharedStruct
class CalculatorHandler:
def __init__(self):
self.log = {}
def ping(self):
print('ping()')
def add(self, n1, n2):
print('add(%d,%d)' % (n1, n2))
return n1 + n2
def calculate(self, logid, work):
print('calculate(%d, %r)' % (logid, work))
if work.op == Operation.ADD:
val = work.num1 + work.num2
elif work.op == Operation.SUBTRACT:
val = work.num1 - work.num2
elif work.op == Operation.MULTIPLY:
val = work.num1 * work.num2
elif work.op == Operation.DIVIDE:
if work.num2 == 0:
x = InvalidOperation()
x.whatOp = work.op
x.why = 'Cannot divide by 0'
raise x
val = work.num1 / work.num2
else:
x = InvalidOperation()
x.whatOp = work.op
x.why = 'Invalid operation'
raise x
log = SharedStruct()
log.key = logid
log.value = '%d' % (val)
self.log[logid] = log
return val
def getStruct(self, key):
print('getStruct(%d)' % (key))
return self.log[key]
def zip(self):
print('zip()')
if __name__ == '__main__':
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
handler = CalculatorHandler()
processor = Calculator.Processor(handler)
transport = TSocket.TServerSocket(host='127.0.0.1', port=9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
server.serve()
# You could do one of these for a multithreaded server
# server = TServer.TThreadedServer(
# processor, transport, tfactory, pfactory)
|
from urlparse import urlsplit, urlunsplit
from storages.backends.s3boto import S3BotoStorage
class ProtocolRelativeS3BotoStorage(S3BotoStorage):
"""Extends S3BotoStorage to return protocol-relative URLs
See: http://paulirish.com/2010/the-protocol-relative-url/
"""
def url(self, name):
"""Modifies return URLs to be protocol-relative."""
url = super(ProtocolRelativeS3BotoStorage, self).url(name)
parts = list(urlsplit(url))
parts[0] = ''
return urlunsplit(parts)
|
import React from "react";
import { graphql, useStaticQuery } from "gatsby";
import styled from "styled-components";
// Components
import { FaFacebookF, FaLinkedinIn, FaTwitter } from "react-icons/fa";
const q = graphql`
query {
site {
siteMetadata {
social {
twitter
linkedin
facebook
}
}
}
}
`;
const Social = () => {
const data = useStaticQuery(q);
const { facebook, linkedin, twitter } = data.site.siteMetadata.social;
return (
<SocialContainer className="social-container">
<li className="social-link social-link__facebook">
<a href={facebook}>
<FaFacebookF />
</a>
</li>
<li className="social-link social-link__linkedin">
<a href={linkedin}>
<FaLinkedinIn />
</a>
</li>
<li className="social-link social-link__twitter">
<a href={twitter}>
<FaTwitter />
</a>
</li>
</SocialContainer>
);
};
export default Social;
const SocialContainer = styled.ul`
display: flex;
list-style-type: none;
padding: 0;
.social-link {
margin-right: 0.5rem;
svg {
fill: #ffffff;
width: 3rem;
height: 3rem;
padding: 1rem;
}
}
.social-link__facebook {
svg {
background: #3a579a;
}
}
.social-link__linkedin {
svg {
background: #127bb6;
}
}
.social-link__twitter {
svg {
background: #4ab3f4;
}
}
`;
|
export function finalController($scope, $rootScope, scopePayload, AnimationService){
$scope.$parent.payload = scopePayload;
AnimationService.animate(scopePayload.template);
$scope.$parent.segueControl ='ready';
// Maybe we use it for the final slide
}
finalController.$inject = ['$scope', '$rootScope', 'scopePayload', 'AnimationService'];
|
""" Code for loading data. """
import numpy as np
import os
import random
import tensorflow as tf
from tensorflow.python.platform import flags
from utils import get_images
FLAGS = flags.FLAGS
class DataGenerator(object):
"""
Data Generator capable of generating batches of sinusoid or Omniglot data.
A "class" is considered a class of omniglot digits or a particular sinusoid function.
"""
def __init__(self, num_samples_per_class, batch_size, config={}):
"""
Args:
num_samples_per_class: num samples to generate per class in one batch
batch_size: size of meta batch size (e.g. number of functions)
"""
self.batch_size = batch_size
self.num_samples_per_class = num_samples_per_class
self.num_classes = 1 # by default 1 (only relevant for classification problems)
if FLAGS.datasource == 'sinusoid':
self.generate = self.generate_sinusoid_batch
self.amp_range = config.get('amp_range', [0.1, 5.0])
self.phase_range = config.get('phase_range', [0, np.pi])
self.input_range = config.get('input_range', [-5.0, 5.0])
self.dim_input = 1
self.dim_output = 1
elif 'omniglot' in FLAGS.datasource:
self.num_classes = config.get('num_classes', FLAGS.num_classes)
self.img_size = config.get('img_size', (28, 28))
self.dim_input = np.prod(self.img_size)
self.dim_output = self.num_classes
# data that is pre-resized using PIL with lanczos filter
data_folder = config.get('data_folder', './data/omniglot_resized')
character_folders = [os.path.join(data_folder, family, character) \
for family in os.listdir(data_folder) \
if os.path.isdir(os.path.join(data_folder, family)) \
for character in os.listdir(os.path.join(data_folder, family))]
random.seed(1)
random.shuffle(character_folders)
num_val = 100
num_train = config.get('num_train', 1200) - num_val
self.metatrain_character_folders = character_folders[:num_train]
if FLAGS.test_set:
self.metaval_character_folders = character_folders[num_train+num_val:]
else:
self.metaval_character_folders = character_folders[num_train:num_train+num_val]
self.rotations = config.get('rotations', [0, 90, 180, 270])
elif FLAGS.datasource == 'miniimagenet':
self.num_classes = config.get('num_classes', FLAGS.num_classes)
self.img_size = config.get('img_size', (84, 84))
self.dim_input = np.prod(self.img_size)*3
self.dim_output = self.num_classes
metatrain_folder = config.get('metatrain_folder', './data/miniImagenet/train')
if FLAGS.test_set:
metaval_folder = config.get('metaval_folder', './data/miniImagenet/test')
else:
metaval_folder = config.get('metaval_folder', './data/miniImagenet/val')
metatrain_folders = [os.path.join(metatrain_folder, label) \
for label in os.listdir(metatrain_folder) \
if os.path.isdir(os.path.join(metatrain_folder, label)) \
]
metaval_folders = [os.path.join(metaval_folder, label) \
for label in os.listdir(metaval_folder) \
if os.path.isdir(os.path.join(metaval_folder, label)) \
]
self.metatrain_character_folders = metatrain_folders
self.metaval_character_folders = metaval_folders
self.rotations = config.get('rotations', [0])
else:
raise ValueError('Unrecognized data source')
def make_data_tensor(self, train=True):
if train:
folders = self.metatrain_character_folders
# number of tasks, not number of meta-iterations. (divide by metabatch size to measure)
num_total_batches = 200000
else:
folders = self.metaval_character_folders
num_total_batches = 600
# make list of files
print('Generating filenames')
all_filenames = []
for _ in range(num_total_batches):
sampled_character_folders = random.sample(folders, self.num_classes)
random.shuffle(sampled_character_folders)
labels_and_images = get_images(sampled_character_folders, range(self.num_classes), nb_samples=self.num_samples_per_class, shuffle=False)
# make sure the above isn't randomized order
labels = [li[0] for li in labels_and_images]
filenames = [li[1] for li in labels_and_images]
all_filenames.extend(filenames)
# make queue for tensorflow to read from
filename_queue = tf.train.string_input_producer(tf.convert_to_tensor(all_filenames), shuffle=False)
print('Generating image processing ops')
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
if FLAGS.datasource == 'miniimagenet':
image = tf.image.decode_jpeg(image_file, channels=3)
image.set_shape((self.img_size[0],self.img_size[1],3))
image = tf.reshape(image, [self.dim_input])
image = tf.cast(image, tf.float32) / 255.0
else:
image = tf.image.decode_png(image_file)
image.set_shape((self.img_size[0],self.img_size[1],1))
image = tf.reshape(image, [self.dim_input])
image = tf.cast(image, tf.float32) / 255.0
image = 1.0 - image # invert
num_preprocess_threads = 1 # TODO - enable this to be set to >1
min_queue_examples = 256
examples_per_batch = self.num_classes * self.num_samples_per_class
batch_image_size = self.batch_size * examples_per_batch
print('Batching images')
images = tf.train.batch(
[image],
batch_size = batch_image_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_image_size,
)
all_image_batches, all_label_batches = [], []
print('Manipulating image data to be right shape')
for i in range(self.batch_size):
image_batch = images[i*examples_per_batch:(i+1)*examples_per_batch]
if FLAGS.datasource == 'omniglot':
# omniglot augments the dataset by rotating digits to create new classes
# get rotation per class (e.g. 0,1,2,0,0 if there are 5 classes)
rotations = tf.multinomial(tf.log([[1., 1.,1.,1.]]), self.num_classes)
label_batch = tf.convert_to_tensor(labels)
new_list, new_label_list = [], []
for k in range(self.num_samples_per_class):
class_idxs = tf.range(0, self.num_classes)
class_idxs = tf.random_shuffle(class_idxs)
true_idxs = class_idxs*self.num_samples_per_class + k
new_list.append(tf.gather(image_batch,true_idxs))
if FLAGS.datasource == 'omniglot': # and FLAGS.train:
new_list[-1] = tf.stack([tf.reshape(tf.image.rot90(
tf.reshape(new_list[-1][ind], [self.img_size[0],self.img_size[1],1]),
k=tf.cast(rotations[0,class_idxs[ind]], tf.int32)), (self.dim_input,))
for ind in range(self.num_classes)])
new_label_list.append(tf.gather(label_batch, true_idxs))
new_list = tf.concat(new_list, 0) # has shape [self.num_classes*self.num_samples_per_class, self.dim_input]
new_label_list = tf.concat(new_label_list, 0)
all_image_batches.append(new_list)
all_label_batches.append(new_label_list)
all_image_batches = tf.stack(all_image_batches)
all_label_batches = tf.stack(all_label_batches)
all_label_batches = tf.one_hot(all_label_batches, self.num_classes)
return all_image_batches, all_label_batches
def generate_sinusoid_batch(self, train=True, input_idx=None):
# Note train arg is not used (but it is used for omniglot method.
# input_idx is used during qualitative testing --the number of examples used for the grad update
amp = np.random.uniform(self.amp_range[0], self.amp_range[1], [self.batch_size])
phase = np.random.uniform(self.phase_range[0], self.phase_range[1], [self.batch_size])
outputs = np.zeros([self.batch_size, self.num_samples_per_class, self.dim_output])
init_inputs = np.zeros([self.batch_size, self.num_samples_per_class, self.dim_input])
for func in range(self.batch_size):
init_inputs[func] = np.random.uniform(self.input_range[0], self.input_range[1], [self.num_samples_per_class, 1])
if input_idx is not None:
init_inputs[:,input_idx:,0] = np.linspace(self.input_range[0], self.input_range[1], num=self.num_samples_per_class-input_idx, retstep=False)
outputs[func] = amp[func] * np.sin(init_inputs[func]-phase[func])
return init_inputs, outputs, amp, phase
|
import _pt from "prop-types";
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
import React from 'react';
export default function withIcon(name) {
return WrappedComponent => {
var _class, _temp;
return _temp = _class = class Icon extends React.Component {
render() {
const {
accessibilityLabel,
color,
decorative,
flip,
flipVertical,
inline,
size
} = this.props;
const props = {
focusable: 'false',
role: decorative ? 'presentation' : 'img',
style: {
height: size,
width: size,
display: inline ? 'inline' : 'block',
fill: color,
transform: flip || flipVertical ? "scale(" + (flip ? -1 : 1) + ", " + (flipVertical ? -1 : 1) + ")" : 'scale(1)',
// keep scale(1) for transition flipping
transition: 'transform 300ms ease-out'
}
};
if ("production" !== process.env.NODE_ENV) {
if (!accessibilityLabel && !decorative) {
// eslint-disable-next-line no-console
console.error('Missing `accessibilityLabel` or `decorative` for accessibility.');
}
if (accessibilityLabel && decorative) {
// eslint-disable-next-line no-console
console.error('Only one of `accessibilityLabel` or `decorative` may be used.');
}
}
if (decorative) {
props['aria-hidden'] = true;
}
if (accessibilityLabel) {
props['aria-label'] = accessibilityLabel;
}
return React.createElement(WrappedComponent, props);
}
}, _defineProperty(_class, "propTypes", {
accessibilityLabel: _pt.string,
decorative: _pt.bool,
flip: _pt.bool,
flipVertical: _pt.bool,
size: _pt.oneOfType([_pt.number, _pt.string]),
color: _pt.string,
inline: _pt.bool
}), _defineProperty(_class, "displayName", name), _defineProperty(_class, "WrappedComponent", WrappedComponent), _defineProperty(_class, "defaultProps", {
color: 'currentColor',
flip: false,
flipVertical: false,
inline: false,
size: '1em'
}), _temp;
};
} |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for low-level eager execution primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python import keras
class Tests(test.TestCase):
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_MatMulCorrectResponse(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
a_100_by_784 = random_ops.random_uniform((100, 784))
b_100_by_784 = random_ops.random_uniform((100, 784))
ctx = context.context()
ctx.ensure_initialized()
self.assertAllClose(
math_ops.matmul(a_2_by_2, b_2_by_2),
pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, a_2_by_2,
b_2_by_2, "transpose_a", False,
"transpose_b", False))
self.assertAllClose(
math_ops.matmul(a_100_by_784, b_100_by_784, transpose_b=True),
pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, a_100_by_784,
b_100_by_784, "transpose_a", False,
"transpose_b", True))
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableMatMulCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
x = pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, m, m,
"transpose_a", False, "transpose_b",
False)
y = pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, a_2_by_2,
a_2_by_2, "transpose_a", False,
"transpose_b", False)
self.assertAllEqual(x, y)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_TapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
tape.watch(a_2_by_2)
z = pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, a_2_by_2,
a_2_by_2, "transpose_a", False,
"transpose_b", False)
dz_dy = tape.gradient(z, [a_2_by_2])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
tape.watch(m)
z = pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, m, m,
"transpose_a", False, "transpose_b",
False)
dz_dy = tape.gradient(z, [m])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
math_ops.add_n([a_2_by_2, b_2_by_2]),
pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name, "AddN",
None, None, [a_2_by_2, b_2_by_2]))
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"AddN", None, None,
[a_2_by_2, b_2_by_2])
z2 = math_ops.add_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1, [a_2_by_2])[0]
dz2_dy = tape.gradient(z2, [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
array_ops.identity_n([a_2_by_2, b_2_by_2]),
pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"IdentityN", None, None,
[a_2_by_2, b_2_by_2]))
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"IdentityN", None, None,
[a_2_by_2, b_2_by_2])
z2 = array_ops.identity_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1[0], [a_2_by_2])[0]
dz2_dy = tape.gradient(z2[0], [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_InvalidInputs(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
ctx = context.context()
ctx.ensure_initialized()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
ctx_handle = ctx._handle # pylint: disable=protected-access
# Not enough base params
with self.assertRaisesRegexp(ValueError,
"at least 5 items in the input tuple"):
pywrap_tfe.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, "Identity")
# Not enough inputs
with self.assertRaisesRegexp(ValueError,
"Expected to be at least 6, was 5"):
pywrap_tfe.TFE_Py_FastPathExecute(ctx_handle, ctx_handle, "Identity",
None, [])
# Bad type
with self.assertRaisesRegexp(TypeError, "expected a string for op_name"):
pywrap_tfe.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, ctx_handle,
None, [], a_2_by_2)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastPathExecute_InvalidAttributes(self):
split_dim = constant_op.constant(0, dtype=dtypes.int32)
value = constant_op.constant([0, 1, 2, 3], dtype=dtypes.float32)
ctx = context.context()
ctx.ensure_initialized()
ctx_handle = ctx._handle
with self.assertRaises(core._FallbackException):
pywrap_tfe.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, "Split",
None, None, split_dim, value,
"num_split", -1)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testInvalidNumOutputs(self):
with self.assertRaisesRegexp(
Exception,
"Value for attr 'num_split' of -1 must be at least minimum 1"):
array_ops.split(value=[1, 2, 3], num_or_size_splits=-1)
with self.assertRaisesRegexp(
Exception,
"Value for attr 'num_split' of 0 must be at least minimum 1"):
array_ops.split(value=[1, 2, 3], num_or_size_splits=0)
def testIsFunction(self):
ctx = context.context()
self.assertFalse(ctx.has_function("not_a_function"))
@def_function.function
def f():
return 1.
self.assertTrue(ctx.has_function(f.get_concrete_function().name))
def testEagerExecute_InvalidType(self):
# Test case for GitHub issue 26879.
value = keras.layers.Input((128, 128, 1), dtype="float32")
with self.assertRaisesRegexp(TypeError,
"Expected list for 'values' argument"):
_ = array_ops.stack(value, axis=1)
def testGraphResourceVariableRaisesFallback(self):
with ops.Graph().as_default():
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
ctx = context.context()
ctx.ensure_initialized()
with self.assertRaises(core._FallbackException):
pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name, "MatMul",
None, None, m, m, "transpose_a", False,
"transpose_b", False)
def testOpDefDefaultType(self):
im = np.random.randint(
low=0, high=65535, size=100, dtype=np.uint16).reshape(10, 10, 1)
context.ensure_initialized()
fastpath_dtype = test_ops.dtype_with_default_op(im).numpy()
slowpath_dtype = test_ops.dtype_with_default_op_eager_fallback(
im, None, context.context()).numpy()
# Ensure the fastpath and slowpath eager paths work.
self.assertEqual(fastpath_dtype, slowpath_dtype)
with ops.Graph().as_default(), self.cached_session():
graph_dtype_symbolic = test_ops.dtype_with_default_op(im)
graph_dtype = self.evaluate(graph_dtype_symbolic)
# Ensure the eager path matches the graph path.
self.assertEqual(fastpath_dtype, graph_dtype)
# Unfortunately, as of now, this doesn't work as expected on def_functions,
# since we convert the numpy arrays to tensors pre-tracing (which won't get
# overriddent by the default type).
@def_function.function
def func(im):
return test_ops.dtype_with_default_op(im)
function_dtype = func(im).numpy()
self.assertNotEqual(fastpath_dtype, function_dtype)
# Captures are OK, since they don't go through the conversion path.
@def_function.function
def func_captured():
return test_ops.dtype_with_default_op(im)
function_dtype = func_captured().numpy()
self.assertEqual(fastpath_dtype, function_dtype)
if __name__ == "__main__":
test.main()
|
angular.module('adminGeoresourcesManagement').component('adminGeoresourcesManagement', {
templateUrl : "components/kommonitorAdmin/adminGeoresourcesManagement/admin-georesources-management.template.html",
controller : ['kommonitorDataExchangeService', 'kommonitorCacheHelperService', 'kommonitorDataGridHelperService', '$scope', '$timeout', '$rootScope', '__env', '$http',
function GeoresourcesManagementController(kommonitorDataExchangeService, kommonitorCacheHelperService, kommonitorDataGridHelperService, $scope, $timeout, $rootScope, __env, $http) {
this.kommonitorDataExchangeServiceInstance = kommonitorDataExchangeService;
// initialize any adminLTE box widgets
$('.box').boxWidget();
$scope.loadingData = true;
$scope.$on("initialMetadataLoadingCompleted", function (event) {
$timeout(function(){
$scope.initializeOrRefreshOverviewTable();
}, 250);
});
$scope.$on("initialMetadataLoadingFailed", function (event, errorArray) {
$timeout(function(){
$scope.loadingData = false;
});
});
$scope.initializeOrRefreshOverviewTable = function(){
$scope.loadingData = true;
kommonitorDataGridHelperService.buildDataGrid_georesources(kommonitorDataExchangeService.availableGeoresources);
$timeout(function(){
$scope.loadingData = false;
});
};
$scope.$on("refreshGeoresourceOverviewTable", function (event, crudType, targetGeoresourceId) {
$scope.loadingData = true;
$scope.refreshGeoresourceOverviewTable(crudType, targetGeoresourceId);
});
$scope.refreshGeoresourceOverviewTable = function(crudType, targetGeoresourceId){
if(! crudType || !targetGeoresourceId){
// refetch all metadata from spatial units to update table
kommonitorDataExchangeService.fetchGeoresourcesMetadata().then(function successCallback(response) {
$scope.initializeOrRefreshOverviewTable();
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
$timeout(function(){
$scope.loadingData = false;
});
}, function errorCallback(response) {
$timeout(function(){
$scope.loadingData = false;
});
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
});
}
else if(crudType && targetGeoresourceId){
if(crudType == "add"){
kommonitorCacheHelperService.fetchSingleGeoresourceMetadata(targetGeoresourceId).then(function successCallback(data) {
kommonitorDataExchangeService.addSingleGeoresourceMetadata(data);
$scope.initializeOrRefreshOverviewTable();
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
$timeout(function(){
$scope.loadingData = false;
});
}, function errorCallback(response) {
$timeout(function(){
$scope.loadingData = false;
});
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
});
}
else if(crudType == "edit"){
kommonitorCacheHelperService.fetchSingleGeoresourceMetadata(targetGeoresourceId).then(function successCallback(data) {
kommonitorDataExchangeService.replaceSingleGeoresourceMetadata(data);
$scope.initializeOrRefreshOverviewTable();
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
$timeout(function(){
$scope.loadingData = false;
});
}, function errorCallback(response) {
$timeout(function(){
$scope.loadingData = false;
});
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
});
}
else if(crudType == "delete"){
// targetGeoresourceId might be array in this case
if(targetGeoresourceId && typeof targetGeoresourceId == "string"){
kommonitorDataExchangeService.deleteSingleGeoresourceMetadata(targetGeoresourceId);
$scope.initializeOrRefreshOverviewTable();
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
$timeout(function(){
$scope.loadingData = false;
});
}
else if (targetGeoresourceId && Array.isArray(targetGeoresourceId)){
for (const id of targetGeoresourceId) {
kommonitorDataExchangeService.deleteSingleGeoresourceMetadata(id);
}
$scope.initializeOrRefreshOverviewTable();
$rootScope.$broadcast("refreshGeoresourceOverviewTableCompleted");
$timeout(function(){
$scope.loadingData = false;
});
}
}
}
};
$scope.onClickDeleteDatasets = function(){
$scope.loadingData = true;
var markedEntriesForDeletion = kommonitorDataGridHelperService.getSelectedGeoresourcesMetadata();
// submit selected spatial units to modal controller
$rootScope.$broadcast("onDeleteGeoresources", markedEntriesForDeletion);
$timeout(function(){
$scope.loadingData = false;
});
};
$scope.onClickEditMetadata = function(georesourceDataset){
// submit selected spatial unit to modal controller
$rootScope.$broadcast("onEditGeoresourceMetadata", georesourceDataset);
};
$scope.onClickEditFeatures = function(georesourceDataset){
// submit selected spatial unit to modal controller
$rootScope.$broadcast("onEditGeoresourceFeatures", georesourceDataset);
};
}
]});
|
define(['text!packages/tpl/list.html','app','swal'],
function (template,app,swal) {
'use strict';
return Backbone.View.extend({
tagName:'tr',
events:{
"click .delete-token":"deleteToken",
"click .edit-token":"updateToken"
},
initialize: function () {
this.template = _.template(template);
this.listenTo(this.model, 'change', this.render);
this.listenTo(this.model, 'destroy', this.remove);
this.setting = this.options.setting;
this.render();
},
render: function () {
this.$el.html(this.template(this.model.toJSON()));
},
getDate:function(date){
return date.slice(0,4) + '-' + date.slice(4,6) +'-'+ date.slice(6,8);
},
getWhen:function( ){
return this.getDate(this.model.get('dayid')) +' Time: '+ this.model.get('timestart') + ' - '+ this.model.get('timeend')
},
deleteToken:function(ev){
var that = this;
var id = $(ev.target).data('id');
var URL = "api/deletejobtypes";
swal({
title: "Are you sure?",
text: "You will not be able to recover this record!",
type: "error",
showCancelButton: true,
confirmButtonClass: 'btn-danger',
confirmButtonText: 'Yes, Delete!'
},
function(isConfirm) {
if (isConfirm) {
$.get(URL, {id:id})
.done(function(data) {
var _json = jQuery.parseJSON(data);
if (typeof _json.error == "undefined") {
that.model.destroy({
success: function() {
swal("Deleted!", "Job type has been deleted.", "success");
}
});
}
else {
swal("Error", "There is problem while deleting :)", "error");
}
});
} else {
}
});
},
updateToken:function(ev){
var that = this;
require(['views/pricing'],function(pricing){
$('#pricing').html(new pricing({app:that.app}).$el);
$('#mdlpricing').modal('show');
})
},
save:function(title,comments,branchid,view){
this.model.set('branchid',branchid);
this.model.set('name',title);
this.model.set('comments',comments);
this.model.save();
view.closeView();
this.setting.successMessage();
}
});
});
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.symmetry.analyzer import (
PointGroupAnalyzer,
SpacegroupAnalyzer,
cluster_sites,
iterative_symmetrize,
)
from pymatgen.util.testing import PymatgenTest
test_dir_mol = os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules")
class SpacegroupAnalyzerTest(PymatgenTest):
def setUp(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
self.structure = p.structure
self.sg = SpacegroupAnalyzer(self.structure, 0.001)
self.disordered_structure = self.get_structure("Li10GeP2S12")
self.disordered_sg = SpacegroupAnalyzer(self.disordered_structure, 0.001)
s = p.structure.copy()
site = s[0]
del s[0]
s.append(site.species, site.frac_coords)
self.sg3 = SpacegroupAnalyzer(s, 0.001)
graphite = self.get_structure("Graphite")
graphite.add_site_property("magmom", [0.1] * len(graphite))
self.sg4 = SpacegroupAnalyzer(graphite, 0.001)
self.structure4 = graphite
def test_primitive(self):
s = Structure.from_spacegroup("Fm-3m", np.eye(3) * 3, ["Cu"], [[0, 0, 0]])
a = SpacegroupAnalyzer(s)
self.assertEqual(len(s), 4)
self.assertEqual(len(a.find_primitive()), 1)
def test_is_laue(self):
s = Structure.from_spacegroup("Fm-3m", np.eye(3) * 3, ["Cu"], [[0, 0, 0]])
a = SpacegroupAnalyzer(s)
self.assertTrue(a.is_laue())
def test_magnetic(self):
lfp = PymatgenTest.get_structure("LiFePO4")
sg = SpacegroupAnalyzer(lfp, 0.1)
self.assertEqual(sg.get_space_group_symbol(), "Pnma")
magmoms = [0] * len(lfp)
magmoms[4] = 1
magmoms[5] = -1
magmoms[6] = 1
magmoms[7] = -1
lfp.add_site_property("magmom", magmoms)
sg = SpacegroupAnalyzer(lfp, 0.1)
self.assertEqual(sg.get_space_group_symbol(), "Pnma")
def test_get_space_symbol(self):
self.assertEqual(self.sg.get_space_group_symbol(), "Pnma")
self.assertEqual(self.disordered_sg.get_space_group_symbol(), "P4_2/nmc")
self.assertEqual(self.sg3.get_space_group_symbol(), "Pnma")
self.assertEqual(self.sg4.get_space_group_symbol(), "P6_3/mmc")
def test_get_space_number(self):
self.assertEqual(self.sg.get_space_group_number(), 62)
self.assertEqual(self.disordered_sg.get_space_group_number(), 137)
self.assertEqual(self.sg4.get_space_group_number(), 194)
def test_get_hall(self):
self.assertEqual(self.sg.get_hall(), "-P 2ac 2n")
self.assertEqual(self.disordered_sg.get_hall(), "P 4n 2n -1n")
def test_get_pointgroup(self):
self.assertEqual(self.sg.get_point_group_symbol(), "mmm")
self.assertEqual(self.disordered_sg.get_point_group_symbol(), "4/mmm")
def test_get_symmetry_operations(self):
for sg, structure in [(self.sg, self.structure), (self.sg4, self.structure4)]:
pgops = sg.get_point_group_operations()
fracsymmops = sg.get_symmetry_operations()
symmops = sg.get_symmetry_operations(True)
latt = structure.lattice
for fop, op, pgop in zip(fracsymmops, symmops, pgops):
# translation vector values should all be 0 or 0.5
t = fop.translation_vector * 2
self.assertArrayAlmostEqual(t - np.round(t), 0)
self.assertArrayAlmostEqual(fop.rotation_matrix, pgop.rotation_matrix)
for site in structure:
newfrac = fop.operate(site.frac_coords)
newcart = op.operate(site.coords)
self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac))
found = False
newsite = PeriodicSite(site.species, newcart, latt, coords_are_cartesian=True)
for testsite in structure:
if newsite.is_periodic_image(testsite, 1e-3):
found = True
break
self.assertTrue(found)
# Make sure this works for any position, not just the atomic
# ones.
random_fcoord = np.random.uniform(size=(3))
random_ccoord = latt.get_cartesian_coords(random_fcoord)
newfrac = fop.operate(random_fcoord)
newcart = op.operate(random_ccoord)
self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac))
def test_get_symmetry_dataset(self):
ds = self.sg.get_symmetry_dataset()
self.assertEqual(ds["international"], "Pnma")
def test_get_crystal_system(self):
crystal_system = self.sg.get_crystal_system()
self.assertEqual("orthorhombic", crystal_system)
self.assertEqual("tetragonal", self.disordered_sg.get_crystal_system())
orig_spg = self.sg._space_group_data["number"]
self.sg._space_group_data["number"] = 0
try:
crystal_system = self.sg.get_crystal_system()
except ValueError as exc:
self.assertEqual(str(exc), "Received invalid space group 0")
finally:
self.sg._space_group_data["number"] = orig_spg
def test_get_refined_structure(self):
for a in self.sg.get_refined_structure().lattice.angles:
self.assertEqual(a, 90)
refined = self.disordered_sg.get_refined_structure()
for a in refined.lattice.angles:
self.assertEqual(a, 90)
self.assertEqual(refined.lattice.a, refined.lattice.b)
structure = self.get_structure("Li2O")
structure.add_site_property("magmom", [1.0] * len(structure))
sg = SpacegroupAnalyzer(structure, 0.01)
refined_struct = sg.get_refined_structure(keep_site_properties=True)
self.assertEqual(refined_struct.site_properties["magmom"], [1.0] * len(refined_struct))
structure = self.get_structure("Li2O")
structure.add_site_property("magmom", [1.0] * len(structure))
sg = SpacegroupAnalyzer(structure, 0.01)
refined_struct = sg.get_refined_structure(keep_site_properties=False)
self.assertEqual(refined_struct.site_properties.get("magmom", None), None)
def test_get_symmetrized_structure(self):
symm_struct = self.sg.get_symmetrized_structure()
for a in symm_struct.lattice.angles:
self.assertEqual(a, 90)
self.assertEqual(len(symm_struct.equivalent_sites), 5)
symm_struct = self.disordered_sg.get_symmetrized_structure()
self.assertEqual(len(symm_struct.equivalent_sites), 8)
self.assertEqual([len(i) for i in symm_struct.equivalent_sites], [16, 4, 8, 4, 2, 8, 8, 8])
s1 = symm_struct.equivalent_sites[1][1]
s2 = symm_struct[symm_struct.equivalent_indices[1][1]]
self.assertEqual(s1, s2)
self.assertEqual(self.sg4.get_symmetrized_structure()[0].magmom, 0.1)
self.assertEqual(symm_struct.wyckoff_symbols[0], "16h")
# Check copying
self.assertEqual(symm_struct.copy(), symm_struct)
d = symm_struct.as_dict()
from pymatgen.symmetry.structure import SymmetrizedStructure
ss = SymmetrizedStructure.from_dict(d)
self.assertEqual(ss.wyckoff_symbols[0], "16h")
self.assertIn("SymmetrizedStructure", ss.__str__())
def test_find_primitive(self):
"""
F m -3 m Li2O testing of converting to primitive cell
"""
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li2O.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure)
primitive_structure = s.find_primitive()
self.assertEqual(primitive_structure.formula, "Li2 O1")
self.assertTrue(primitive_structure.site_properties.get("magmom", None) is None)
# This isn't what is expected. All the angles should be 60
self.assertAlmostEqual(primitive_structure.lattice.alpha, 60)
self.assertAlmostEqual(primitive_structure.lattice.beta, 60)
self.assertAlmostEqual(primitive_structure.lattice.gamma, 60)
self.assertAlmostEqual(primitive_structure.lattice.volume, structure.lattice.volume / 4.0)
structure = parser.get_structures(False)[0]
structure.add_site_property("magmom", [1.0] * len(structure))
s = SpacegroupAnalyzer(structure)
primitive_structure = s.find_primitive(keep_site_properties=True)
self.assertEqual(primitive_structure.site_properties["magmom"], [1.0] * len(primitive_structure))
structure = parser.get_structures(False)[0]
structure.add_site_property("magmom", [1.0] * len(structure))
s = SpacegroupAnalyzer(structure)
primitive_structure = s.find_primitive(keep_site_properties=False)
self.assertEqual(primitive_structure.site_properties.get("magmom", None), None)
def test_get_ir_reciprocal_mesh(self):
grid = self.sg.get_ir_reciprocal_mesh()
self.assertEqual(len(grid), 216)
self.assertAlmostEqual(grid[1][0][0], 0.1)
self.assertAlmostEqual(grid[1][0][1], 0.0)
self.assertAlmostEqual(grid[1][0][2], 0.0)
self.assertAlmostEqual(grid[1][1], 2)
def test_get_conventional_standard_structure(self):
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "bcc_1927.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 9.1980270633769461)
self.assertAlmostEqual(conv.lattice.b, 9.1980270633769461)
self.assertAlmostEqual(conv.lattice.c, 9.1980270633769461)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "btet_1915.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 5.0615106678044235)
self.assertAlmostEqual(conv.lattice.b, 5.0615106678044235)
self.assertAlmostEqual(conv.lattice.c, 4.2327080177761687)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orci_1010.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 2.9542233922299999)
self.assertAlmostEqual(conv.lattice.b, 4.6330325651443296)
self.assertAlmostEqual(conv.lattice.c, 5.373703587040775)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orcc_1003.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 4.1430033493799998)
self.assertAlmostEqual(conv.lattice.b, 31.437979757624728)
self.assertAlmostEqual(conv.lattice.c, 3.99648651)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orac_632475.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 3.1790663399999999)
self.assertAlmostEqual(conv.lattice.b, 9.9032878699999998)
self.assertAlmostEqual(conv.lattice.c, 3.5372412099999999)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "monoc_1028.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 117.53832420192903)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 14.033435583000625)
self.assertAlmostEqual(conv.lattice.b, 3.96052850731)
self.assertAlmostEqual(conv.lattice.c, 6.8743926325200002)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "hex_1170.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 120)
self.assertAlmostEqual(conv.lattice.a, 3.699919902005897)
self.assertAlmostEqual(conv.lattice.b, 3.699919902005897)
self.assertAlmostEqual(conv.lattice.c, 6.9779585500000003)
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "tric_684654.json"))
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 74.09581916308757)
self.assertAlmostEqual(conv.lattice.beta, 75.72817279281173)
self.assertAlmostEqual(conv.lattice.gamma, 63.63234318667333)
self.assertAlmostEqual(conv.lattice.a, 3.741372924048738)
self.assertAlmostEqual(conv.lattice.b, 3.9883228679270686)
self.assertAlmostEqual(conv.lattice.c, 7.288495840048958)
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "tric_684654.json"))
structure.add_site_property("magmom", [1.0] * len(structure))
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure(keep_site_properties=True)
self.assertEqual(conv.site_properties["magmom"], [1.0] * len(conv))
structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "tric_684654.json"))
structure.add_site_property("magmom", [1.0] * len(structure))
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure(keep_site_properties=False)
self.assertEqual(conv.site_properties.get("magmom", None), None)
def test_get_primitive_standard_structure(self):
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "bcc_1927.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 109.47122063400001)
self.assertAlmostEqual(prim.lattice.beta, 109.47122063400001)
self.assertAlmostEqual(prim.lattice.gamma, 109.47122063400001)
self.assertAlmostEqual(prim.lattice.a, 7.9657251015812145)
self.assertAlmostEqual(prim.lattice.b, 7.9657251015812145)
self.assertAlmostEqual(prim.lattice.c, 7.9657251015812145)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "btet_1915.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 105.015053349)
self.assertAlmostEqual(prim.lattice.beta, 105.015053349)
self.assertAlmostEqual(prim.lattice.gamma, 118.80658411899999)
self.assertAlmostEqual(prim.lattice.a, 4.1579321075608791)
self.assertAlmostEqual(prim.lattice.b, 4.1579321075608791)
self.assertAlmostEqual(prim.lattice.c, 4.1579321075608791)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orci_1010.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 134.78923546600001)
self.assertAlmostEqual(prim.lattice.beta, 105.856239333)
self.assertAlmostEqual(prim.lattice.gamma, 91.276341676000001)
self.assertAlmostEqual(prim.lattice.a, 3.8428217771014852)
self.assertAlmostEqual(prim.lattice.b, 3.8428217771014852)
self.assertAlmostEqual(prim.lattice.c, 3.8428217771014852)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orcc_1003.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 90)
self.assertAlmostEqual(prim.lattice.beta, 90)
self.assertAlmostEqual(prim.lattice.gamma, 164.985257335)
self.assertAlmostEqual(prim.lattice.a, 15.854897098324196)
self.assertAlmostEqual(prim.lattice.b, 15.854897098324196)
self.assertAlmostEqual(prim.lattice.c, 3.99648651)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orac_632475.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 90)
self.assertAlmostEqual(prim.lattice.beta, 90)
self.assertAlmostEqual(prim.lattice.gamma, 144.40557588533386)
self.assertAlmostEqual(prim.lattice.a, 5.2005185662155391)
self.assertAlmostEqual(prim.lattice.b, 5.2005185662155391)
self.assertAlmostEqual(prim.lattice.c, 3.5372412099999999)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "monoc_1028.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 63.579155761999999)
self.assertAlmostEqual(prim.lattice.beta, 116.42084423747779)
self.assertAlmostEqual(prim.lattice.gamma, 148.47965136208569)
self.assertAlmostEqual(prim.lattice.a, 7.2908007159612325)
self.assertAlmostEqual(prim.lattice.b, 7.2908007159612325)
self.assertAlmostEqual(prim.lattice.c, 6.8743926325200002)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "hex_1170.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 90)
self.assertAlmostEqual(prim.lattice.beta, 90)
self.assertAlmostEqual(prim.lattice.gamma, 120)
self.assertAlmostEqual(prim.lattice.a, 3.699919902005897)
self.assertAlmostEqual(prim.lattice.b, 3.699919902005897)
self.assertAlmostEqual(prim.lattice.c, 6.9779585500000003)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "rhomb_3478_conv.cif"))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 28.049186140546812)
self.assertAlmostEqual(prim.lattice.beta, 28.049186140546812)
self.assertAlmostEqual(prim.lattice.gamma, 28.049186140546812)
self.assertAlmostEqual(prim.lattice.a, 5.9352627428399982)
self.assertAlmostEqual(prim.lattice.b, 5.9352627428399982)
self.assertAlmostEqual(prim.lattice.c, 5.9352627428399982)
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "rhomb_3478_conv.cif"))
structure = parser.get_structures(False)[0]
structure.add_site_property("magmom", [1.0] * len(structure))
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure(keep_site_properties=True)
self.assertEqual(prim.site_properties["magmom"], [1.0] * len(prim))
parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "rhomb_3478_conv.cif"))
structure = parser.get_structures(False)[0]
structure.add_site_property("magmom", [1.0] * len(structure))
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure(keep_site_properties=False)
self.assertEqual(prim.site_properties.get("magmom", None), None)
def test_tricky_structure(self):
# for some reason this structure kills spglib1.9
# 1.7 can't find symmetry either, but at least doesn't kill python
s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.tricky_symmetry"))
sa = SpacegroupAnalyzer(s, 0.1)
sa.get_space_group_symbol()
sa.get_space_group_number()
sa.get_point_group_symbol()
sa.get_crystal_system()
sa.get_hall()
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
self.structure = p.structure
self.sg1 = SpacegroupAnalyzer(self.structure, 0.001).get_space_group_operations()
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0, 1]]
sites2 = [self.structure[i] for i in [2, 3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0, 1]]
sites2 = [self.structure[i] for i in [0, 2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
H2O2 = Molecule(
["O", "O", "H", "H"],
[
[0, 0.727403, -0.050147],
[0, -0.727403, -0.050147],
[0.83459, 0.897642, 0.401175],
[-0.83459, -0.897642, 0.401175],
],
)
C2H2F2Br2 = Molecule(
["C", "C", "F", "Br", "H", "F", "H", "Br"],
[
[-0.752000, 0.001000, -0.141000],
[0.752000, -0.001000, 0.141000],
[-1.158000, 0.991000, 0.070000],
[-1.240000, -0.737000, 0.496000],
[-0.924000, -0.249000, -1.188000],
[1.158000, -0.991000, -0.070000],
[0.924000, 0.249000, 1.188000],
[1.240000, 0.737000, -0.496000],
],
)
H2O = Molecule(
["H", "O", "H"],
[[0, 0.780362, -0.456316], [0, 0, 0.114079], [0, -0.780362, -0.456316]],
)
C2H4 = Molecule(
["C", "C", "H", "H", "H", "H"],
[
[0.0000, 0.0000, 0.6695],
[0.0000, 0.0000, -0.6695],
[0.0000, 0.9289, 1.2321],
[0.0000, -0.9289, 1.2321],
[0.0000, 0.9289, -1.2321],
[0.0000, -0.9289, -1.2321],
],
)
NH3 = Molecule(
["N", "H", "H", "H"],
[
[0.0000, 0.0000, 0.0000],
[0.0000, -0.9377, -0.3816],
[0.8121, 0.4689, -0.3816],
[-0.8121, 0.4689, -0.3816],
],
)
BF3 = Molecule(
["B", "F", "F", "F"],
[
[0.0000, 0.0000, 0.0000],
[0.0000, -0.9377, 0.00],
[0.8121, 0.4689, 0],
[-0.8121, 0.4689, 0],
],
)
CH4 = Molecule(
["C", "H", "H", "H", "H"],
[
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.08],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
],
)
PF6 = Molecule(
["P", "F", "F", "F", "F", "F", "F"],
[[0, 0, 0], [0, 0, 1], [0, 0, -1], [0, 1, 0], [0, -1, 0], [1, 0, 0], [-1, 0, 0]],
)
class PointGroupAnalyzerTest(PymatgenTest):
def test_spherical(self):
a = PointGroupAnalyzer(CH4)
self.assertEqual(a.sch_symbol, "Td")
self.assertEqual(len(a.get_pointgroup()), 24)
self.assertEqual(a.get_rotational_symmetry_number(), 12)
a = PointGroupAnalyzer(H2O)
self.assertEqual(a.get_rotational_symmetry_number(), 2)
a = PointGroupAnalyzer(PF6)
self.assertEqual(a.sch_symbol, "Oh")
self.assertEqual(len(a.get_pointgroup()), 48)
m = Molecule.from_file(os.path.join(test_dir_mol, "c60.xyz"))
a = PointGroupAnalyzer(m)
self.assertEqual(a.sch_symbol, "Ih")
cube_species = ["C", "C", "C", "C", "C", "C", "C", "C"]
cube_coords = [
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
m = Molecule(cube_species, cube_coords)
a = PointGroupAnalyzer(m, 0.1)
self.assertEqual(a.sch_symbol, "Oh")
def test_tricky(self):
m = Molecule.from_file(os.path.join(test_dir_mol, "dh.xyz"))
a = PointGroupAnalyzer(m, 0.1)
self.assertEqual(a.sch_symbol, "D*h")
def test_linear(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.08],
[0, 0.000000, -1.08],
]
mol = Molecule(["C", "H", "H"], coords)
a = PointGroupAnalyzer(mol)
self.assertEqual(a.sch_symbol, "D*h")
mol = Molecule(["C", "H", "N"], coords)
a = PointGroupAnalyzer(mol)
self.assertEqual(a.sch_symbol, "C*v")
def test_asym_top(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.08],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
mol = Molecule(["C", "H", "F", "Br", "Cl"], coords)
a = PointGroupAnalyzer(mol)
self.assertEqual(a.sch_symbol, "C1")
self.assertEqual(len(a.get_pointgroup()), 1)
coords = [
[0.000000, 0.000000, 1.08],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
cs_mol = Molecule(["H", "F", "Cl", "Cl"], coords)
a = PointGroupAnalyzer(cs_mol)
self.assertEqual(a.sch_symbol, "Cs")
self.assertEqual(len(a.get_pointgroup()), 2)
a = PointGroupAnalyzer(C2H2F2Br2)
self.assertEqual(a.sch_symbol, "Ci")
self.assertEqual(len(a.get_pointgroup()), 2)
def test_cyclic(self):
a = PointGroupAnalyzer(H2O2)
self.assertEqual(a.sch_symbol, "C2")
self.assertEqual(len(a.get_pointgroup()), 2)
a = PointGroupAnalyzer(H2O)
self.assertEqual(a.sch_symbol, "C2v")
self.assertEqual(len(a.get_pointgroup()), 4)
a = PointGroupAnalyzer(NH3)
self.assertEqual(a.sch_symbol, "C3v")
self.assertEqual(len(a.get_pointgroup()), 6)
cs2 = Molecule.from_file(os.path.join(test_dir_mol, "Carbon_Disulfide.xyz"))
a = PointGroupAnalyzer(cs2, eigen_tolerance=0.001)
self.assertEqual(a.sch_symbol, "C2v")
def test_dihedral(self):
a = PointGroupAnalyzer(C2H4)
self.assertEqual(a.sch_symbol, "D2h")
self.assertEqual(len(a.get_pointgroup()), 8)
a = PointGroupAnalyzer(BF3)
self.assertEqual(a.sch_symbol, "D3h")
self.assertEqual(len(a.get_pointgroup()), 12)
m = Molecule.from_file(os.path.join(test_dir_mol, "b12h12.xyz"))
a = PointGroupAnalyzer(m)
self.assertEqual(a.sch_symbol, "Ih")
def test_symmetrize_molecule1(self):
np.random.seed(77)
distortion = np.random.randn(len(C2H4), 3) / 10
dist_mol = Molecule(C2H4.species, C2H4.cart_coords + distortion)
eq = iterative_symmetrize(dist_mol, max_n=100, epsilon=1e-7)
sym_mol, eq_sets, ops = eq["sym_mol"], eq["eq_sets"], eq["sym_ops"]
self.assertTrue({0, 1} in eq_sets.values())
self.assertTrue({2, 3, 4, 5} in eq_sets.values())
coords = sym_mol.cart_coords
for i, eq_set in eq_sets.items():
for j in eq_set:
_ = np.dot(ops[i][j], coords[i])
self.assertTrue(np.allclose(np.dot(ops[i][j], coords[i]), coords[j]))
def test_symmetrize_molecule2(self):
np.random.seed(77)
distortion = np.random.randn(len(C2H2F2Br2), 3) / 20
dist_mol = Molecule(C2H2F2Br2.species, C2H2F2Br2.cart_coords + distortion)
PA1 = PointGroupAnalyzer(C2H2F2Br2, tolerance=0.1)
self.assertTrue(PA1.get_pointgroup().sch_symbol == "Ci")
PA2 = PointGroupAnalyzer(dist_mol, tolerance=0.1)
self.assertTrue(PA2.get_pointgroup().sch_symbol == "C1")
eq = iterative_symmetrize(dist_mol, tolerance=0.3)
PA3 = PointGroupAnalyzer(eq["sym_mol"], tolerance=0.1)
self.assertTrue(PA3.get_pointgroup().sch_symbol == "Ci")
def test_get_kpoint_weights(self):
for name in ["SrTiO3", "LiFePO4", "Graphite"]:
s = PymatgenTest.get_structure(name)
a = SpacegroupAnalyzer(s)
ir_mesh = a.get_ir_reciprocal_mesh((4, 4, 4))
weights = [i[1] for i in ir_mesh]
weights = np.array(weights) / sum(weights)
for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])):
self.assertAlmostEqual(i, w)
for name in ["SrTiO3", "LiFePO4", "Graphite"]:
s = PymatgenTest.get_structure(name)
a = SpacegroupAnalyzer(s)
ir_mesh = a.get_ir_reciprocal_mesh((1, 2, 3))
weights = [i[1] for i in ir_mesh]
weights = np.array(weights) / sum(weights)
for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])):
self.assertAlmostEqual(i, w)
v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, "vasprun.xml"))
a = SpacegroupAnalyzer(v.final_structure)
wts = a.get_kpoint_weights(v.actual_kpoints)
for w1, w2 in zip(v.actual_kpoints_weights, wts):
self.assertAlmostEqual(w1, w2)
kpts = [[0, 0, 0], [0.15, 0.15, 0.15], [0.2, 0.2, 0.2]]
self.assertRaises(ValueError, a.get_kpoint_weights, kpts)
class FuncTest(unittest.TestCase):
def test_cluster_sites(self):
o, c = cluster_sites(CH4, 0.1)
self.assertEqual(o.specie.symbol, "C")
self.assertEqual(len(c), 1)
o, c = cluster_sites(C2H2F2Br2.get_centered_molecule(), 0.1)
self.assertIsNone(o)
self.assertEqual(len(c), 4)
if __name__ == "__main__":
unittest.main()
|
"""
Django settings for url_shortener project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import django_heroku
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$vjqib-&h*v3$9s!)o*jw^@cupy*9&zh@905(krhp!(o4f1&wv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["dikhe.ga","www.dikhe.ga"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main.apps.MainConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'url_shortener.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'url_shortener.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
#STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),]
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
# Activate Django-Heroku.
django_heroku.settings(locals()) |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'add/$', views.add_entry, name='add_entry'),
url(r'detail/(?P<case_id>[0-9]+)$', views.detail, name='detail_entry'),
url(r'advancedSearch/$', views.advanced_search, name='advanced_search'),
url(r'office/(?P<district_id>[0-9]+)/$', views.district_detail, name='district_detail'),
]
|
import os
import yaml
import rosbag
import decimal
def check_topic_bag(topic, bags):
if topic:
for bag in bags:
assert topic in bag.get_type_and_topic_info().topics, (
"topic: {} is not recorded in {}".format(topic, bag._filename))
return True
return False
def load_bags(path, multi):
list_bags = []
if multi:
for bag in os.listdir(path):
f_bag = os.path.join(path, bag)
list_bags.append(f_bag)
else:
list_bags.append(path)
bags = []
for path_bag in list_bags:
_, bag_name = os.path.split(path_bag)
print("Loading: {}".format(bag_name))
bags.append(rosbag.Bag(path_bag))
print()
return bags
def create_folder(path_save, folder_name):
path_folder = os.path.join(path_save, folder_name)
os.makedirs(path_folder, exist_ok=True)
print(">> Saving folder: {}\n".format(path_folder))
return path_folder
def get_path_save(path_bags, path_save, multi):
if path_save == "":
bag_path = path_bags[:-1] if path_bags[-1] == "/" else path_bags
path_save, _ = os.path.split(bag_path)
# If the path direct to a specific bag file, then we need to
# backtrace of two path back.
# Ex: .../smth/bag/bag-name.bag -> .../smth/
if not multi:
path_save, _ = os.path.split(path_save)
if not os.path.exists(path_save):
os.makedirs(path_save, exist_ok=True)
return path_save
def msg_to_timestamp(msg):
# Trick to trim the message and parse the yaml
# data more efficiently.
msg = "\n".join(str(msg)[:200].split("\n")[:5])
# Extract seconds and nanoseconds
msg_dict = yaml.load(str(msg), Loader=yaml.FullLoader)
stamp = msg_dict["header"]["stamp"]
sec = float(str(stamp["secs"]))
nsec = float(str(stamp["nsecs"]))
# Compose time in seconds
nsec_to_sec = nsec * 1e-9
time = decimal.Decimal(sec + nsec_to_sec)
# Convert time in nanoseconds
to_nsec = decimal.Decimal(1e+9)
timestamp = int(time * to_nsec)
return timestamp
|
# -*- coding: utf-8 -*-
# @Time : 2020-05-20 10:25
# @Author : Shupeng
import copy
import torch.nn as nn
def clone(module, N):
"Produce N identical layers"
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.