text
stringlengths
4
1.02M
meta
dict
from collections import defaultdict from threading import local from django.core.cache import caches from django.db import DEFAULT_DB_ALIAS from .settings import cachalot_settings from .signals import post_invalidation from .transaction import AtomicCache class CacheHandler(local): @property def atomic_caches(self): if not hasattr(self, '_atomic_caches'): self._atomic_caches = defaultdict(list) return self._atomic_caches def get_atomic_cache(self, cache_alias, db_alias, level): if cache_alias not in self.atomic_caches[db_alias][level]: self.atomic_caches[db_alias][level][cache_alias] = AtomicCache( self.get_cache(cache_alias, db_alias, level-1), db_alias) return self.atomic_caches[db_alias][level][cache_alias] def get_cache(self, cache_alias=None, db_alias=None, atomic_level=-1): if db_alias is None: db_alias = DEFAULT_DB_ALIAS if cache_alias is None: cache_alias = cachalot_settings.CACHALOT_CACHE min_level = -len(self.atomic_caches[db_alias]) if atomic_level < min_level: return caches[cache_alias] return self.get_atomic_cache(cache_alias, db_alias, atomic_level) def enter_atomic(self, db_alias): if db_alias is None: db_alias = DEFAULT_DB_ALIAS self.atomic_caches[db_alias].append({}) def exit_atomic(self, db_alias, commit): if db_alias is None: db_alias = DEFAULT_DB_ALIAS atomic_caches = self.atomic_caches[db_alias].pop().values() if commit: to_be_invalidated = set() for atomic_cache in atomic_caches: atomic_cache.commit() to_be_invalidated.update(atomic_cache.to_be_invalidated) # This happens when committing the outermost atomic block. if not self.atomic_caches[db_alias]: for table in to_be_invalidated: post_invalidation.send(table, db_alias=db_alias) cachalot_caches = CacheHandler()
{ "content_hash": "11cbc107670bf6759eafd37dfb20b4ce", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 75, "avg_line_length": 37.089285714285715, "alnum_prop": 0.6398651901781416, "repo_name": "BertrandBordage/django-cachalot", "id": "6acde16114672a8acfc7b3880494de2c535698cb", "size": "2077", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cachalot/cache.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "640" }, { "name": "Python", "bytes": "204619" } ], "symlink_target": "" }
from molluscs.v1 import molluscclient def sample_classify(video, location): # Create a client client = molluscclient.MolluscServiceClient() # Initialize request argument(s) classify_target = molluscclient.ClassifyTarget() # video = "path/to/mollusc/video.mkv" with open(video, "rb") as f: classify_target.video = f.read() # location = "New Zealand" classify_target.location_annotation = location request = molluscclient.molluscs.v1.ClassifyRequest( classify_target=classify_target, ) # Make the request response = client.classify(request=request) # Handle the response print(f"Mollusc is a \"{response.taxonomy}\"") # [END mollusc_classify_sync]
{ "content_hash": "ff8f00a23b03438bab6a577b1d91834a", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 56, "avg_line_length": 25.964285714285715, "alnum_prop": 0.6905089408528198, "repo_name": "googleapis/gapic-generator-python", "id": "d8f4cb53cbad3e77c9e7c08030bd829824f8e17b", "size": "2080", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "tests/unit/samplegen/golden_snippets/sample_basic.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2702" }, { "name": "Jinja", "bytes": "767902" }, { "name": "Python", "bytes": "4802905" }, { "name": "Shell", "bytes": "31013" }, { "name": "Starlark", "bytes": "26281" } ], "symlink_target": "" }
import os import numpy as np import matplotlib.pyplot as plt import tbmodels as tb import pymatgen as mg import pymatgen.symmetry.analyzer import pymatgen.symmetry.bandstructure import symmetry_representation as sr def compare_bands_plot(model1, model2, structure): path = mg.symmetry.bandstructure.HighSymmKpath(structure) kpts, labels = path.get_kpoints(line_density=200) # de-duplicate / merge labels for i in range(len(labels) - 1): if labels[i] and labels[i + 1]: if labels[i] != labels[i + 1]: labels[i] = labels[i] + " | " + labels[i + 1] labels[i + 1] = "" # E-fermi is just an approximation efermi = model1.eigenval([0, 0, 0])[model1.occ] E1 = [model1.eigenval(k) for k in kpts] E2 = [model2.eigenval(k) for k in kpts] plt.figure() labels_clean = [] labels_idx = [] for i, l in enumerate(labels): if l: labels_idx.append(i) labels_clean.append("$" + l + "$") for i in labels_idx[1:-1]: plt.axvline(i, color="b") plt.plot(range(len(kpts)), E1 - efermi, "k") plt.plot(range(len(kpts)), E2 - efermi, "r", lw=0.5) plt.xticks(labels_idx, labels_clean) plt.xlim([0, len(kpts) - 1]) plt.ylim([-6, 6]) plt.savefig("results/compare_bands_new.pdf", bbox_inches="tight") if __name__ == "__main__": model_nosym = tb.Model.from_hdf5_file("data/model_nosym.hdf5") reference_model = tb.Model.from_hdf5_file("data/reference_model.hdf5") # change the order of the orbitals from (In: s, py, pz, px; As: py, pz, px) * 2 # to (In: s, px, py, pz; As: px, py, pz) * 2 model_nosym = model_nosym.slice_orbitals( [0, 2, 3, 1, 5, 6, 4, 7, 9, 10, 8, 12, 13, 11] ) pos_In = (0, 0, 0) pos_As = (0.25, 0.25, 0.25) spin_up = sr.Spin(total=0.5, z_component=0.5) spin_down = sr.Spin(total=0.5, z_component=-0.5) orbitals = [ sr.Orbital(position=pos_In, function_string="1", spin=spin_up), sr.Orbital(position=pos_In, function_string="x", spin=spin_up), sr.Orbital(position=pos_In, function_string="y", spin=spin_up), sr.Orbital(position=pos_In, function_string="z", spin=spin_up), sr.Orbital(position=pos_As, function_string="x", spin=spin_up), sr.Orbital(position=pos_As, function_string="y", spin=spin_up), sr.Orbital(position=pos_As, function_string="z", spin=spin_up), sr.Orbital(position=pos_In, function_string="1", spin=spin_down), sr.Orbital(position=pos_In, function_string="x", spin=spin_down), sr.Orbital(position=pos_In, function_string="y", spin=spin_down), sr.Orbital(position=pos_In, function_string="z", spin=spin_down), sr.Orbital(position=pos_As, function_string="x", spin=spin_down), sr.Orbital(position=pos_As, function_string="y", spin=spin_down), sr.Orbital(position=pos_As, function_string="z", spin=spin_down), ] # set up symmetry operations time_reversal = sr.get_time_reversal(orbitals=orbitals, numeric=True) assert np.allclose( time_reversal.repr.matrix, np.kron([[0, -1j], [1j, 0]], np.eye(7)) ) structure = mg.Structure( lattice=model_nosym.uc, species=["In", "As"], coords=np.array([[0, 0, 0], [0.25, 0.25, 0.25]]), ) # get real-space representations analyzer = mg.symmetry.analyzer.SpacegroupAnalyzer(structure) symops = analyzer.get_symmetry_operations(cartesian=False) symops_cart = analyzer.get_symmetry_operations(cartesian=True) symmetries = [] for sym, sym_cart in zip(symops, symops_cart): symmetries.append( sr.SymmetryOperation.from_orbitals( orbitals=orbitals, real_space_operator=sr.RealSpaceOperator.from_pymatgen(sym), rotation_matrix_cartesian=sym_cart.rotation_matrix, numeric=True, ) ) os.makedirs("results", exist_ok=True) model_tr = model_nosym.symmetrize([time_reversal]) model = model_tr.symmetrize(symmetries, full_group=True) model.to_hdf5_file("results/model_new.hdf5") compare_bands_plot(model_nosym, model, structure) for R in set(model.hop.keys()) | set(reference_model.hop.keys()): assert np.isclose(model.hop[R], reference_model.hop[R]).all() # Check that the symmetries are fulfilled at some random k k = (0.12312351, 0.73475412, 0.2451235) assert np.isclose( model.hamilton(k, convention=1), time_reversal.repr.matrix @ # when complex conjugation is present, r-space matrix (R) and k-space matrix (K) # are related by K = -(R.T)^{-1} # -> K^{-1} = -R.T model.hamilton(-time_reversal.rotation_matrix.T @ k, convention=1).conjugate() @ time_reversal.repr.matrix.conjugate().T, ).all() for sym in symmetries: assert np.isclose( model.hamilton(k, convention=1), sym.repr.matrix @ # k-space and r-space matrices are related by transposing and inverting # -> k-matrix^{-1} == r-matrix.T model.hamilton(sym.rotation_matrix.T @ k, convention=1) @ sym.repr.matrix.conjugate().T, ).all()
{ "content_hash": "8f88fb6f552105aaa706e950d65b0ae6", "timestamp": "", "source": "github", "line_count": 132, "max_line_length": 88, "avg_line_length": 39.734848484848484, "alnum_prop": 0.6144899904671115, "repo_name": "Z2PackDev/TBmodels", "id": "8c905a7eeb8166ab5e0073b1c212012e57e9b731", "size": "5328", "binary": false, "copies": "1", "ref": "refs/heads/trunk", "path": "examples/symmetrization/symmorphic_InAs/symmetrize_new.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "206314" }, { "name": "Shell", "bytes": "803" } ], "symlink_target": "" }
""" A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ from collections import defaultdict, namedtuple from collections.abc import Mapping import re import warnings import inspect import json import types from functools import partial, reduce import sys from itertools import zip_longest, chain from types import TracebackType from typing import ( Any, Callable, Dict, Generic, IO, Iterable, Iterator, List, Optional, Sequence, Tuple, Type, Union, cast, no_type_check, TYPE_CHECKING, ) import datetime import numpy as np import pandas as pd from pandas.api.types import ( # type: ignore[attr-defined] is_bool_dtype, is_list_like, is_dict_like, is_scalar, ) from pandas.tseries.frequencies import DateOffset, to_offset if TYPE_CHECKING: from pandas.io.formats.style import Styler from pandas.core.dtypes.common import infer_dtype_from_object from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.inference import is_sequence from pyspark import StorageLevel from pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F from pyspark.sql.functions import pandas_udf from pyspark.sql.types import ( ArrayType, BooleanType, DataType, DoubleType, NumericType, Row, StringType, StructField, StructType, DecimalType, TimestampType, TimestampNTZType, ) from pyspark.sql.window import Window from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm. from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, Label, Name, Scalar, T from pyspark.pandas.accessors import PandasOnSparkFrameMethods from pyspark.pandas.config import option_context, get_option from pyspark.pandas.spark import functions as SF from pyspark.pandas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods from pyspark.pandas.utils import ( align_diff_frames, column_labels_level, combine_frames, default_session, is_name_like_tuple, is_name_like_value, is_testing, name_like_string, same_anchor, scol_for, validate_arguments_and_invoke_function, validate_axis, validate_bool_kwarg, validate_how, validate_mode, verify_temp_column_name, log_advice, ) from pyspark.pandas.generic import Frame from pyspark.pandas.internal import ( InternalField, InternalFrame, HIDDEN_COLUMNS, NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT, SPARK_DEFAULT_INDEX_NAME, SPARK_DEFAULT_SERIES_NAME, SPARK_INDEX_NAME_PATTERN, ) from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame from pyspark.pandas.ml import corr from pyspark.pandas.typedef.typehints import ( as_spark_type, infer_return_type, pandas_on_spark_type, spark_type_to_pandas_dtype, DataFrameType, SeriesType, ScalarType, create_tuple_for_frame_type, ) from pyspark.pandas.plot import PandasOnSparkPlotAccessor if TYPE_CHECKING: from pyspark.sql._typing import OptionalPrimitiveType from pyspark.pandas.groupby import DataFrameGroupBy from pyspark.pandas.resample import DataFrameResampler from pyspark.pandas.indexes import Index from pyspark.pandas.series import Series # These regular expression patterns are complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ and _repr_html_ in DataFrame. # Two patterns basically seek the footer string from Pandas' REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$" ) _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`. Parameters ---------- other : scalar Any single data Returns ------- DataFrame Result of the arithmetic operation. Examples -------- >>> df = ps.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle'], ... columns=['angles', 'degrees']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. Also reverse version. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(df) angles degrees circle 0 720 triangle 6 360 rectangle 8 720 >>> df + df + df angles degrees circle 0 1080 triangle 9 540 rectangle 12 1080 >>> df.radd(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide and true divide by constant with reverse version. >>> df / 10 angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 >>> df.truediv(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rtruediv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract by constant with reverse version. >>> df - 1 angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.sub(1) angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.rsub(1) angles degrees circle 1 -359 triangle -2 -179 rectangle -3 -359 Multiply by constant with reverse version. >>> df * 1 angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.mul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.rmul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Floor Divide by constant with reverse version. >>> df // 10 angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.floordiv(10) angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.rfloordiv(10) # doctest: +SKIP angles degrees circle inf 0.0 triangle 3.0 0.0 rectangle 2.0 0.0 Mod by constant with reverse version. >>> df % 2 angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.mod(2) angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.rmod(2) angles degrees circle NaN 2 triangle 2.0 2 rectangle 2.0 2 Power by constant with reverse version. >>> df ** 2 angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.pow(2) angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.rpow(2) angles degrees circle 1.0 2.348543e+108 triangle 8.0 1.532496e+54 rectangle 16.0 2.348543e+108 """ class DataFrame(Frame, Generic[T]): """ pandas-on-Spark DataFrame that corresponds to pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _internal: an internal immutable Frame to manage metadata. :type _internal: InternalFrame Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \ or pandas-on-Spark Series Dict can contain Series, arrays, constants, or list-like objects Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a pandas-on-Spark Series, other arguments should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ps.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from pandas DataFrame >>> df = ps.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2'])) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ps.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ps.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__( # type: ignore[no-untyped-def] self, data=None, index=None, columns=None, dtype=None, copy=False ): if isinstance(data, InternalFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = data elif isinstance(data, SparkDataFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = InternalFrame(spark_frame=data, index_spark_columns=None) elif isinstance(data, ps.Series): assert index is None assert columns is None assert dtype is None assert not copy data = data.to_frame() internal = data._internal else: if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy pdf = data else: from pyspark.pandas.indexes.base import Index if isinstance(index, Index): raise TypeError( "The given index cannot be a pandas-on-Spark index. " "Try pandas index or array-like." ) pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) internal = InternalFrame.from_pandas(pdf) object.__setattr__(self, "_internal_frame", internal) @property def _pssers(self) -> Dict[Label, "Series"]: """Return a dict of column label -> Series which anchors `self`.""" from pyspark.pandas.series import Series if not hasattr(self, "_psseries"): object.__setattr__( self, "_psseries", {label: Series(data=self, index=label) for label in self._internal.column_labels}, ) else: psseries = cast(Dict[Label, Series], self._psseries) # type: ignore[has-type] assert len(self._internal.column_labels) == len(psseries), ( len(self._internal.column_labels), len(psseries), ) if any(self is not psser._psdf for psser in psseries.values()): # Refresh the dict to contain only Series anchoring `self`. self._psseries = { label: ( psseries[label] if self is psseries[label]._psdf else Series(data=self, index=label) ) for label in self._internal.column_labels } return self._psseries @property def _internal(self) -> InternalFrame: return cast(InternalFrame, self._internal_frame) # type: ignore[has-type] def _update_internal_frame( self, internal: InternalFrame, check_same_anchor: bool = True, anchor_force_disconnect: bool = False, ) -> None: """ Update InternalFrame with the given one. If the column_label is changed or the new InternalFrame is not the same `anchor` or the `anchor_force_disconnect` flag is set to True, disconnect the original anchor and create a new one. If `check_same_anchor` is `False`, checking whether or not the same anchor is ignored and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy, updating the underlying Spark DataFrame which need to combine a different Spark DataFrame. Parameters ---------- internal : InternalFrame The new InternalFrame check_same_anchor : bool Whether checking the same anchor anchor_force_disconnect : bool Force to disconnect the original anchor and create a new one """ from pyspark.pandas.series import Series if hasattr(self, "_psseries"): psseries = {} for old_label, new_label in zip_longest( self._internal.column_labels, internal.column_labels ): if old_label is not None: psser = self._pssers[old_label] renamed = old_label != new_label not_same_anchor = check_same_anchor and not same_anchor(internal, psser) if renamed or not_same_anchor or anchor_force_disconnect: psdf: DataFrame = DataFrame(self._internal.select_column(old_label)) psser._update_anchor(psdf) psser = None else: psser = None if new_label is not None: if psser is None: psser = Series(data=self, index=new_label) psseries[new_label] = psser self._psseries = psseries self._internal_frame = internal if hasattr(self, "_repr_pandas_cache"): del self._repr_pandas_cache @property def ndim(self) -> int: """ Return an int representing the number of array dimensions. return 2 for DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', None], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 None 7 8 >>> df.ndim 2 """ return 2 @property def axes(self) -> List: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def _reduce_for_stat_function( self, sfun: Callable[["Series"], Column], name: str, axis: Optional[Axis] = None, numeric_only: bool = True, skipna: bool = True, **kwargs: Any, ) -> "Series": """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. Parameters ---------- sfun : either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. axis: used only for sanity check because series only support index axis. name : original pandas API name. axis : axis to apply. 0 or 1, or 'index' or 'columns. numeric_only : bool, default True Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter currently. skipna : bool, default True Exclude NA/null values when computing the result. """ from pyspark.pandas.series import Series, first_series axis = validate_axis(axis) if axis == 0: min_count = kwargs.get("min_count", 0) exprs = [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] new_column_labels = [] for label in self._internal.column_labels: psser = self._psser_for(label) is_numeric_or_boolean = isinstance( psser.spark.data_type, (NumericType, BooleanType) ) keep_column = not numeric_only or is_numeric_or_boolean if keep_column: if not skipna and get_option("compute.eager_check") and psser.hasnans: scol = F.first(F.lit(np.nan)) else: scol = sfun(psser) if min_count > 0: scol = F.when(Frame._count_expr(psser) >= min_count, scol) exprs.append(scol.alias(name_like_string(label))) new_column_labels.append(label) if len(exprs) == 1: return Series([]) sdf = self._internal.spark_frame.select(*exprs) # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) else: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs) if len(pdf) <= limit: return Series(pser) @pandas_udf(returnType=as_spark_type(pser.dtype.type)) # type: ignore[call-overload] def calculate_columns_axis(*cols: pd.Series) -> pd.Series: return getattr(pd.concat(cols, axis=1), name)( axis=axis, numeric_only=numeric_only, **kwargs ) column_name = verify_temp_column_name( self._internal.spark_frame.select(self._internal.index_spark_columns), "__calculate_columns_axis__", ) sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)] ) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=self._internal.index_names, index_fields=self._internal.index_fields, ) return first_series(DataFrame(internal)).rename(pser.name) def _psser_for(self, label: Label) -> "Series": """ Create Series with a proper column label. The given label must be verified to exist in `InternalFrame.column_labels`. For example, in some method, self is like: >>> self = ps.range(3) `self._psser_for(label)` can be used with `InternalFrame.column_labels`: >>> self._psser_for(self._internal.column_labels[0]) 0 0 1 1 2 2 Name: id, dtype: int64 `self._psser_for(label)` must not be used directly with user inputs. In that case, `self[label]` should be used instead, which checks the label exists or not: >>> self['id'] 0 0 1 1 2 2 Name: id, dtype: int64 """ return self._pssers[label] def _apply_series_op( self, op: Callable[["Series"], Union["Series", Column]], should_resolve: bool = False ) -> "DataFrame": applied = [] for label in self._internal.column_labels: applied.append(op(self._psser_for(label))) internal = self._internal.with_new_columns(applied) if should_resolve: internal = internal.resolved_copy return DataFrame(internal) # Arithmetic Operators def _map_series_op(self, op: str, other: Any) -> "DataFrame": from pyspark.pandas.base import IndexOpsMixin if not isinstance(other, DataFrame) and ( isinstance(other, IndexOpsMixin) or is_sequence(other) ): raise TypeError( "%s with a sequence is currently not supported; " "however, got %s." % (op, type(other).__name__) ) if isinstance(other, DataFrame): if self._internal.column_labels_level != other._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") if not same_anchor(self, other): # Different DataFrames def apply_op( psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label], ) -> Iterator[Tuple["Series", Label]]: for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( getattr(psdf._psser_for(this_label), op)( psdf._psser_for(that_label) ).rename(this_label), this_label, ) return align_diff_frames(apply_op, self, other, fillna=True, how="full") else: applied = [] column_labels = [] for label in self._internal.column_labels: if label in other._internal.column_labels: applied.append(getattr(self._psser_for(label), op)(other._psser_for(label))) else: applied.append( SF.lit(None) .cast(self._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) for label in other._internal.column_labels: if label not in column_labels: applied.append( SF.lit(None) .cast(other._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) internal = self._internal.with_new_columns(applied, column_labels=column_labels) return DataFrame(internal) else: return self._apply_series_op(lambda psser: getattr(psser, op)(other)) def __add__(self, other: Any) -> "DataFrame": return self._map_series_op("add", other) def __radd__(self, other: Any) -> "DataFrame": return self._map_series_op("radd", other) def __truediv__(self, other: Any) -> "DataFrame": return self._map_series_op("truediv", other) def __rtruediv__(self, other: Any) -> "DataFrame": return self._map_series_op("rtruediv", other) def __mul__(self, other: Any) -> "DataFrame": return self._map_series_op("mul", other) def __rmul__(self, other: Any) -> "DataFrame": return self._map_series_op("rmul", other) def __sub__(self, other: Any) -> "DataFrame": return self._map_series_op("sub", other) def __rsub__(self, other: Any) -> "DataFrame": return self._map_series_op("rsub", other) def __pow__(self, other: Any) -> "DataFrame": return self._map_series_op("pow", other) def __rpow__(self, other: Any) -> "DataFrame": return self._map_series_op("rpow", other) def __mod__(self, other: Any) -> "DataFrame": return self._map_series_op("mod", other) def __rmod__(self, other: Any) -> "DataFrame": return self._map_series_op("rmod", other) def __floordiv__(self, other: Any) -> "DataFrame": return self._map_series_op("floordiv", other) def __rfloordiv__(self, other: Any) -> "DataFrame": return self._map_series_op("rfloordiv", other) def __abs__(self) -> "DataFrame": return self._apply_series_op(lambda psser: abs(psser)) def __neg__(self) -> "DataFrame": return self._apply_series_op(lambda psser: -psser) def add(self, other: Any) -> "DataFrame": return self + other # create accessor for plot plot = CachedAccessor("plot", PandasOnSparkPlotAccessor) # create accessor for Spark related methods. spark = CachedAccessor("spark", SparkFrameMethods) # create accessor for pandas-on-Spark specific methods. pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkFrameMethods) # keep the name "koalas" for backward compatibility. koalas = CachedAccessor("koalas", PandasOnSparkFrameMethods) @no_type_check def hist(self, bins=10, **kwds): return self.plot.hist(bins, **kwds) hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__ @no_type_check def boxplot(self, **kwds): return self.plot.box(**kwds) boxplot.__doc__ = PandasOnSparkPlotAccessor.box.__doc__ @no_type_check def kde(self, bw_method=None, ind=None, **kwds): return self.plot.kde(bw_method, ind, **kwds) kde.__doc__ = PandasOnSparkPlotAccessor.kde.__doc__ add.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd" ) def radd(self, other: Any) -> "DataFrame": return other + self radd.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="other + dataframe", reverse="add" ) def div(self, other: Any) -> "DataFrame": return self / other div.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv" ) divide = div def rdiv(self, other: Any) -> "DataFrame": return other / self rdiv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div" ) def truediv(self, other: Any) -> "DataFrame": return self / other truediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv" ) def rtruediv(self, other: Any) -> "DataFrame": return other / self rtruediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv" ) def mul(self, other: Any) -> "DataFrame": return self * other mul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul" ) multiply = mul def rmul(self, other: Any) -> "DataFrame": return other * self rmul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul" ) def sub(self, other: Any) -> "DataFrame": return self - other sub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub" ) subtract = sub def rsub(self, other: Any) -> "DataFrame": return other - self rsub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub" ) def mod(self, other: Any) -> "DataFrame": return self % other mod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod" ) def rmod(self, other: Any) -> "DataFrame": return other % self rmod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod" ) def pow(self, other: Any) -> "DataFrame": return self**other pow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow" ) def rpow(self, other: Any) -> "DataFrame": return other**self rpow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow" ) def floordiv(self, other: Any) -> "DataFrame": return self // other floordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv" ) def rfloordiv(self, other: Any) -> "DataFrame": return other // self rfloordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv" ) # Comparison Operators def __eq__(self, other: Any) -> "DataFrame": # type: ignore[override] return self._map_series_op("eq", other) def __ne__(self, other: Any) -> "DataFrame": # type: ignore[override] return self._map_series_op("ne", other) def __lt__(self, other: Any) -> "DataFrame": return self._map_series_op("lt", other) def __le__(self, other: Any) -> "DataFrame": return self._map_series_op("le", other) def __ge__(self, other: Any) -> "DataFrame": return self._map_series_op("ge", other) def __gt__(self, other: Any) -> "DataFrame": return self._map_series_op("gt", other) def eq(self, other: Any) -> "DataFrame": """ Compare if the current value is equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.eq(1) a b a True True b False False c False True d False False """ return self == other equals = eq def gt(self, other: Any) -> "DataFrame": """ Compare if the current value is greater than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.gt(2) a b a False False b False False c True False d True False """ return self > other def ge(self, other: Any) -> "DataFrame": """ Compare if the current value is greater than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ge(1) a b a True True b True False c True True d True False """ return self >= other def lt(self, other: Any) -> "DataFrame": """ Compare if the current value is less than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.lt(1) a b a False False b False False c False False d False False """ return self < other def le(self, other: Any) -> "DataFrame": """ Compare if the current value is less than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.le(2) a b a True True b True False c False True d False False """ return self <= other def ne(self, other: Any) -> "DataFrame": """ Compare if the current value is not equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ne(1) a b a False False b True True c True False d True True """ return self != other def applymap(self, func: Callable[[Any], Any]) -> "DataFrame": """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> def str_len(x) -> int: ... return len(str(x)) >>> df.applymap(str_len) 0 1 0 3 4 1 5 5 >>> def power(x) -> float: ... return x ** 2 >>> df.applymap(power) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.applymap(lambda x: x ** 2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # TODO: We can implement shortcut theoretically since it creates new DataFrame # anyway and we don't have to worry about operations on different DataFrames. return self._apply_series_op(lambda psser: psser.apply(func)) # TODO: not all arguments are implemented comparing to pandas' for now. def aggregate(self, func: Union[List[str], Dict[Name, List[str]]]) -> "DataFrame": """Aggregate using one or more operations over the specified axis. Parameters ---------- func : dict or a list a dict mapping from column name (string) to aggregate functions (list of strings). If a list is given, the aggregation is performed against all columns. Returns ------- DataFrame Notes ----- `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Invoke function on DataFrame. DataFrame.transform : Only perform transforming type operations. DataFrame.groupby : Perform operations over groups. Series.aggregate : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) >>> df A B C 0 1.0 2.0 3.0 1 4.0 5.0 6.0 2 7.0 8.0 9.0 3 NaN NaN NaN Aggregate these functions over the rows. >>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index() A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index() A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN For multi-index columns: >>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) >>> df.agg(['sum', 'min'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index() X Y A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 >>> aggregated = df.agg({("X", "A") : ['sum', 'min'], ("X", "B") : ['min', 'max']}) >>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE X A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN """ from pyspark.pandas.groupby import GroupBy if isinstance(func, list): if all((isinstance(f, str) for f in func)): func = dict([(column, func) for column in self.columns]) else: raise ValueError( "If the given function is a list, it " "should only contains function names as strings." ) if not isinstance(func, dict) or not all( is_name_like_value(key) and ( isinstance(value, str) or (isinstance(value, list) and all(isinstance(v, str) for v in value)) ) for key, value in func.items() ): raise ValueError( "aggs must be a dict mapping from column name to aggregate " "functions (string or list of strings)." ) with option_context("compute.default_index_type", "distributed"): psdf: DataFrame = DataFrame(GroupBy._spark_groupby(self, func)) # The codes below basically converts: # # A B # sum min min max # 0 12.0 1.0 2.0 8.0 # # to: # A B # max NaN 8.0 # min 1.0 2.0 # sum 12.0 NaN # # Aggregated output is usually pretty much small. return psdf.stack().droplevel(0)[list(func.keys())] agg = aggregate def corr(self, method: str = "pearson") -> "DataFrame": """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : DataFrame See Also -------- Series.corr Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between pandas-on-Spark and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. pandas-on-Spark will return an error. * pandas-on-Spark doesn't support the following argument(s). * `min_periods` argument is not supported """ return cast(DataFrame, ps.from_pandas(corr(self, method))) # TODO: add axis parameter and support more methods def corrwith( self, other: DataFrameOrSeries, drop: bool = False, method: str = "pearson" ) -> "Series": """ Compute pairwise correlation. Pairwise correlation is computed between rows or columns of DataFrame with rows or columns of Series or DataFrame. DataFrames are first aligned along both axes before computing the correlations. .. versionadded:: 3.4.0 Parameters ---------- other : DataFrame, Series Object with which to compute correlations. drop : bool, default False Drop missing indices from result. method : str, default 'pearson' Method of correlation, one of: * pearson : standard correlation coefficient Returns ------- Series Pairwise correlations. See Also -------- DataFrame.corr : Compute pairwise correlation of columns. Examples -------- >>> df1 = ps.DataFrame({ ... "A":[1, 5, 7, 8], ... "X":[5, 8, 4, 3], ... "C":[10, 4, 9, 3]}) >>> df1.corrwith(df1[["X", "C"]]) X 1.0 C 1.0 A NaN dtype: float64 >>> df2 = ps.DataFrame({ ... "A":[5, 3, 6, 4], ... "B":[11, 2, 4, 3], ... "C":[4, 3, 8, 5]}) >>> with ps.option_context("compute.ops_on_diff_frames", True): ... df1.corrwith(df2) A -0.041703 C 0.395437 X NaN B NaN dtype: float64 >>> with ps.option_context("compute.ops_on_diff_frames", True): ... df2.corrwith(df1.X) A -0.597614 B -0.151186 C -0.642857 dtype: float64 """ from pyspark.pandas.series import Series, first_series if (method is not None) and (method not in ["pearson"]): raise NotImplementedError("corrwith currently works only for method='pearson'") if not isinstance(other, (DataFrame, Series)): raise TypeError("unsupported type: {}".format(type(other).__name__)) right_is_series = isinstance(other, Series) if same_anchor(self, other): combined = self this = self that = other else: combined = combine_frames(self, other, how="inner") this = combined["this"] that = combined["that"] this_numeric_column_labels: List[Label] = [] for column_label in this._internal.column_labels: if isinstance(this._internal.spark_type_for(column_label), (NumericType, BooleanType)): this_numeric_column_labels.append(column_label) that_numeric_column_labels: List[Label] = [] for column_label in that._internal.column_labels: if isinstance(that._internal.spark_type_for(column_label), (NumericType, BooleanType)): that_numeric_column_labels.append(column_label) intersect_numeric_column_labels: List[Label] = [] diff_numeric_column_labels: List[Label] = [] corr_scols = [] if right_is_series: intersect_numeric_column_labels = this_numeric_column_labels that_scol = that._internal.spark_column_for(that_numeric_column_labels[0]) for numeric_column_label in intersect_numeric_column_labels: this_scol = this._internal.spark_column_for(numeric_column_label) corr_scols.append( F.corr(this_scol.cast("double"), that_scol.cast("double")).alias( name_like_string(numeric_column_label) ) ) else: for numeric_column_label in this_numeric_column_labels: if numeric_column_label in that_numeric_column_labels: intersect_numeric_column_labels.append(numeric_column_label) else: diff_numeric_column_labels.append(numeric_column_label) for numeric_column_label in that_numeric_column_labels: if numeric_column_label not in this_numeric_column_labels: diff_numeric_column_labels.append(numeric_column_label) for numeric_column_label in intersect_numeric_column_labels: this_scol = this._internal.spark_column_for(numeric_column_label) that_scol = that._internal.spark_column_for(numeric_column_label) corr_scols.append( F.corr(this_scol.cast("double"), that_scol.cast("double")).alias( name_like_string(numeric_column_label) ) ) corr_labels: List[Label] = intersect_numeric_column_labels if not drop: for numeric_column_label in diff_numeric_column_labels: corr_scols.append( SF.lit(None).cast("double").alias(name_like_string(numeric_column_label)) ) corr_labels.append(numeric_column_label) sdf = combined._internal.spark_frame.select( *[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *corr_scols ).limit( 1 ) # limit(1) to avoid returning more than 1 row when intersection is empty # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=corr_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) def iteritems(self) -> Iterator[Tuple[Name, "Series"]]: """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ return ( (label if len(label) > 1 else label[0], self._psser_for(label)) for label in self._internal.column_labels ) def iterrows(self) -> Iterator[Tuple[Name, pd.Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : pandas.Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns internal_index_columns = self._internal.index_spark_column_names internal_data_columns = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]: k = ( row[internal_index_columns[0]] if len(internal_index_columns) == 1 else tuple(row[c] for c in internal_index_columns) ) v = [row[c] for c in internal_data_columns] return k, v for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator() ): s = pd.Series(v, index=columns, name=k) yield k, s def itertuples( self, index: bool = True, name: Optional[str] = "PandasOnSpark" ) -> Iterator[Tuple]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "PandasOnSpark" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. On python versions < 3.7 regular tuples are returned for DataFrames with a large number of columns (>254). Examples -------- >>> df = ps.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... PandasOnSpark(Index='dog', num_legs=4, num_wings=0) PandasOnSpark(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... PandasOnSpark(num_legs=4, num_wings=0) PandasOnSpark(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ fields = list(self.columns) if index: fields.insert(0, "Index") index_spark_column_names = self._internal.index_spark_column_names data_spark_column_names = self._internal.data_spark_column_names def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]: k = ( row[index_spark_column_names[0]] if len(index_spark_column_names) == 1 else tuple(row[c] for c in index_spark_column_names) ) v = [row[c] for c in data_spark_column_names] return k, v can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255 if name is not None and can_return_named_tuples: itertuple = namedtuple(name, fields, rename=True) # type: ignore[misc] for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator(), ): yield itertuple._make(([k] if index else []) + list(v)) else: for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator(), ): yield tuple(([k] if index else []) + list(v)) def items(self) -> Iterator[Tuple[Name, "Series"]]: """This is an alias of ``iteritems``.""" return self.iteritems() def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None: """ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none See Also -------- read_clipboard : Read text from clipboard. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7 """ args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args ) def to_html( self, buf: Optional[IO[str]] = None, columns: Optional[Sequence[Name]] = None, col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None, header: bool = True, index: bool = True, na_rep: str = "NaN", formatters: Optional[ Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]] ] = None, float_format: Optional[Callable[[float], str]] = None, sparsify: Optional[bool] = None, index_names: bool = True, justify: Optional[str] = None, max_rows: Optional[int] = None, max_cols: Optional[int] = None, show_dimensions: bool = False, decimal: str = ".", bold_rows: bool = True, classes: Optional[Union[str, list, tuple]] = None, escape: bool = True, notebook: bool = False, border: Optional[int] = None, table_id: Optional[str] = None, render_links: bool = False, ) -> Optional[str]: """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args ) def to_string( self, buf: Optional[IO[str]] = None, columns: Optional[Sequence[Name]] = None, col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None, header: bool = True, index: bool = True, na_rep: str = "NaN", formatters: Optional[ Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]] ] = None, float_format: Optional[Callable[[float], str]] = None, sparsify: Optional[bool] = None, index_names: bool = True, justify: Optional[str] = None, max_rows: Optional[int] = None, max_cols: Optional[int] = None, show_dimensions: bool = False, decimal: str = ".", line_width: Optional[int] = None, ) -> Optional[str]: """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: psdf = self.head(max_rows) else: psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args ) def to_dict(self, orient: str = "dict", into: Type = dict) -> Union[List, Mapping]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args ) def to_latex( self, buf: Optional[IO[str]] = None, columns: Optional[List[Name]] = None, col_space: Optional[int] = None, header: bool = True, index: bool = True, na_rep: str = "NaN", formatters: Optional[ Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]] ] = None, float_format: Optional[Callable[[float], str]] = None, sparsify: Optional[bool] = None, index_names: bool = True, bold_rows: bool = False, column_format: Optional[str] = None, longtable: Optional[bool] = None, escape: Optional[bool] = None, encoding: Optional[str] = None, decimal: str = ".", multicolumn: Optional[bool] = None, multicolumn_format: Optional[str] = None, multirow: Optional[bool] = None, ) -> Optional[str]: r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE \begin{tabular}{lll} \toprule name & mask & weapon \\ \midrule Raphael & red & sai \\ Donatello & purple & bo staff \\ \bottomrule \end{tabular} """ args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args ) # TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic # when creating arrays) def transpose(self) -> "DataFrame": """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ps.DataFrame({'a': range(1001)}).transpose() Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Returns ------- DataFrame The transposed DataFrame. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the coerced dtype. For instance, if int and float have to be placed in same column, it becomes float. If type coercion is not possible, it fails. Also, note that the values in index should be unique because they become unique column names. In addition, if Spark 2.3 is used, the types should always be exactly same. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2']) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP >>> df1_transposed # doctest: +SKIP 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes # doctest: +SKIP 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'score': [9.5, 8], ... 'kids': [0, 0], ... 'age': [12, 22]} >>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age']) >>> df2 score kids age 0 9.5 0 12 1 8.0 0 22 >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP >>> df2_transposed # doctest: +SKIP 0 1 age 12.0 22.0 kids 0.0 0.0 score 9.5 8.0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the coerced dtype: >>> df2.dtypes score float64 kids int64 age int64 dtype: object >>> df2_transposed.dtypes # doctest: +SKIP 0 float64 1 float64 dtype: object """ max_compute_count = get_option("compute.max_rows") if max_compute_count is not None: pdf = self.head(max_compute_count + 1)._to_internal_pandas() if len(pdf) > max_compute_count: raise ValueError( "Current DataFrame has more then the given limit {0} rows. " "Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' " "to retrieve to retrieve more than {0} rows. Note that, before changing the " "'compute.max_rows', this operation is considerably expensive.".format( max_compute_count ) ) return DataFrame(pdf.transpose()) # Explode the data to be pairs. # # For instance, if the current input DataFrame is as below: # # +------+------+------+------+------+ # |index1|index2|(a,x1)|(a,x2)|(b,x3)| # +------+------+------+------+------+ # | y1| z1| 1| 0| 0| # | y2| z2| 0| 50| 0| # | y3| z3| 3| 2| 1| # +------+------+------+------+------+ # # Output of `exploded_df` becomes as below: # # +-----------------+-----------------+-----------------+-----+ # | index|__index_level_0__|__index_level_1__|value| # +-----------------+-----------------+-----------------+-----+ # |{"a":["y1","z1"]}| a| x1| 1| # |{"a":["y1","z1"]}| a| x2| 0| # |{"a":["y1","z1"]}| b| x3| 0| # |{"a":["y2","z2"]}| a| x1| 0| # |{"a":["y2","z2"]}| a| x2| 50| # |{"a":["y2","z2"]}| b| x3| 0| # |{"a":["y3","z3"]}| a| x1| 3| # |{"a":["y3","z3"]}| a| x2| 2| # |{"a":["y3","z3"]}| b| x3| 1| # +-----------------+-----------------+-----------------+-----+ pairs = F.explode( F.array( *[ F.struct( *[ SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label) ], *[self._internal.spark_column_for(label).alias("value")], ) for label in self._internal.column_labels ] ) ) exploded_df = self._internal.spark_frame.withColumn("pairs", pairs).select( [ F.to_json( F.struct( F.array(*[scol for scol in self._internal.index_spark_columns]).alias("a") ) ).alias("index"), F.col("pairs.*"), ] ) # After that, executes pivot with key and its index column. # Note that index column should contain unique values since column names # should be unique. internal_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index") transposed_df = pivoted_df.agg(F.first(F.col("value"))) new_data_columns = list( filter(lambda x: x not in internal_index_columns, transposed_df.columns) ) column_labels = [ None if len(label) == 1 and label[0] is None else label for label in (tuple(json.loads(col)["a"]) for col in new_data_columns) ] internal = InternalFrame( spark_frame=transposed_df, index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns], index_names=self._internal.column_label_names, column_labels=column_labels, data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns], column_label_names=self._internal.index_names, ) return DataFrame(internal) T = property(transpose) def apply( self, func: Callable, axis: Axis = 0, args: Sequence[Any] = (), **kwds: Any ) -> Union["Series", "DataFrame", "Index"]: """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: when `axis` is 0 or 'index', the `func` is unable to access to the whole input series. pandas-on-Spark internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole series but of the batch internally ... # used. ... def length(s) -> int: ... return len(s) ... >>> df = ps.DataFrame({'A': range(1000)}) >>> df.apply(length, axis=0) # doctest: +SKIP 0 83 1 83 2 83 ... 10 83 11 83 dtype: int32 .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify the return type as `Series` or scalar value in ``func``, for instance, as below: >>> def square(s) -> ps.Series[np.int32]: ... return s ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. In case when axis is 1, it requires to specify `DataFrame` or scalar value with type hints as below: >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]: ... return x + 1 If the return type is specified as `DataFrame`, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. To specify the column names, you can assign them in a pandas friendly style as below: >>> def plus_one(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]: ... return x + 1 >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}) >>> def plus_one(x) -> ps.DataFrame[ ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]: ... return x + 1 Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap : For elementwise operations. DataFrame.aggregate : Only perform aggregating type operations. DataFrame.transform : Only perform transforming type operations. Series.apply : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> def sqrt(x) -> ps.Series[float]: ... return np.sqrt(x) ... >>> df.apply(sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.apply(np.sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 When `axis` is 1 or 'columns', it applies the function for each row. >>> def summation(x) -> np.int64: ... return np.sum(x) ... >>> df.apply(summation, axis=1) 0 13 1 13 2 13 dtype: int64 Likewise, you can omit the type hint and let pandas-on-Spark infer its type. >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 >>> df.apply(max, axis=1) 0 9 1 9 2 9 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object In order to specify the types when `axis` is '1', it should use DataFrame[...] annotation. In this case, the column names are automatically generated. >>> def identify(x) -> ps.DataFrame[('index', int), [('A', np.int64), ('B', np.int64)]]: ... return x ... >>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE A B index 0 4 9 1 4 9 2 4 9 You can also specify extra arguments. >>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]: ... return a + b + c ... >>> df.apply(plus_two, axis=1, args=(1,), c=3) c0 c1 0 8 13 1 8 13 2 8 13 """ from pyspark.pandas.groupby import GroupBy from pyspark.pandas.series import first_series if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func # Note that the return type hint specified here affects actual return # type in Spark (e.g., infer_return_type). And, MyPy does not allow # redefinition of a function. func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731 axis = validate_axis(axis) should_return_series = False spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None should_retain_index = should_infer_schema def apply_func(pdf: pd.DataFrame) -> pd.DataFrame: pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type] if isinstance(pdf_or_pser, pd.Series): return pdf_or_pser.to_frame() else: return pdf_or_pser self_applied: DataFrame = DataFrame(self._internal.resolved_copy) column_labels: Optional[List[Label]] = None if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. log_advice( "If the type hints is not specified for `apply`, " "it is expensive to infer the data type internally." ) limit = get_option("compute.shortcut_limit") pdf = self_applied.head(limit + 1)._to_internal_pandas() applied = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type] psser_or_psdf = ps.from_pandas(applied) if len(pdf) <= limit: return psser_or_psdf psdf = psser_or_psdf if isinstance(psser_or_psdf, ps.Series): should_return_series = True psdf = psser_or_psdf._psdf index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields] data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields] return_schema = StructType([field.struct_field for field in index_fields + data_fields]) output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=should_retain_index ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) # If schema is inferred, we can restore indexes too. internal = psdf._internal.with_new_sdf( spark_frame=sdf, index_fields=index_fields, data_fields=data_fields ) else: return_type = infer_return_type(func) require_index_axis = isinstance(return_type, SeriesType) require_column_axis = isinstance(return_type, DataFrameType) index_fields = None if require_index_axis: if axis != 0: raise TypeError( "The given function should specify a scalar or a series as its type " "hints when axis is 0 or 'index'; however, the return type " "was %s" % return_sig ) dtype = cast(SeriesType, return_type).dtype spark_type = cast(SeriesType, return_type).spark_type data_fields = [ InternalField( dtype=dtype, struct_field=StructField(name=name, dataType=spark_type) ) for name in self_applied.columns ] return_schema = StructType([field.struct_field for field in data_fields]) elif require_column_axis: if axis != 1: raise TypeError( "The given function should specify a scalar or a frame as its type " "hints when axis is 1 or 'column'; however, the return type " "was %s" % return_sig ) index_fields = cast(DataFrameType, return_type).index_fields should_retain_index = len(index_fields) > 0 data_fields = cast(DataFrameType, return_type).data_fields return_schema = cast(DataFrameType, return_type).spark_type else: # any axis is fine. should_return_series = True spark_type = cast(ScalarType, return_type).spark_type dtype = cast(ScalarType, return_type).dtype data_fields = [ InternalField( dtype=dtype, struct_field=StructField( name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type ), ) ] return_schema = StructType([field.struct_field for field in data_fields]) column_labels = [None] output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=should_retain_index ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) index_spark_columns = None index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None if should_retain_index: index_spark_columns = [ scol_for(sdf, index_field.struct_field.name) for index_field in index_fields ] if not any( [ SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name) for index_field in index_fields ] ): index_names = [(index_field.struct_field.name,) for index_field in index_fields] internal = InternalFrame( spark_frame=sdf, index_names=index_names, index_spark_columns=index_spark_columns, index_fields=index_fields, data_fields=data_fields, column_labels=column_labels, ) result: DataFrame = DataFrame(internal) if should_return_series: return first_series(result) else: return result def transform( self, func: Callable[..., "Series"], axis: Axis = 0, *args: Any, **kwargs: Any ) -> "DataFrame": """ Call ``func`` on self producing a Series with transformed values and that has the same length as its input. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> ps.Series[np.int32]: ... return x ** 2 pandas-on-Spark uses return type hint and does not try to infer the type. .. note:: the series within ``func`` is actually multiple pandas series as the segments of the whole pandas-on-Spark series; therefore, the length of each series is not guaranteed. As an example, an aggregation against each series does work as a global aggregation but an aggregation of each segment. See below: >>> def func(x) -> ps.Series[np.int32]: ... return x + sum(x) Parameters ---------- func : function Function to use for transforming the data. It must work when pandas Series is passed. axis : int, default 0 or 'index' Can only be set to 0 at the moment. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame A DataFrame that must have the same length as self. Raises ------ Exception : If the returned DataFrame has a different length than self. See Also -------- DataFrame.aggregate : Only perform aggregating type operations. DataFrame.apply : Invoke function on DataFrame. Series.transform : The equivalent function for Series. Examples -------- >>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B']) >>> df A B 0 0 1 1 1 2 2 2 3 >>> def square(x) -> ps.Series[np.int32]: ... return x ** 2 >>> df.transform(square) A B 0 0 1 1 1 4 2 4 9 You can omit the type hint and let pandas-on-Spark infer its type. >>> df.transform(lambda x: x ** 2) A B 0 0 1 1 1 4 2 4 9 For multi-index columns: >>> df.columns = [('X', 'A'), ('X', 'B')] >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 4 2 4 9 >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 2 2 2 3 You can also specify extra arguments. >>> def calculation(x, y, z) -> ps.Series[int]: ... return x ** y + z >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE X A B 0 20 21 1 21 1044 2 1044 59069 """ if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func # Note that the return type hint specified here affects actual return # type in Spark (e.g., infer_return_type). And, MyPy does not allow # redefinition of a function. func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731 axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. log_advice( "If the type hints is not specified for `transform`, " "it is expensive to infer the data type internally." ) limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() transformed = pdf.transform(func, axis, *args, **kwargs) # type: ignore[arg-type] psdf: DataFrame = DataFrame(transformed) if len(pdf) <= limit: return psdf applied = [] data_fields = [] for input_label, output_label in zip( self._internal.column_labels, psdf._internal.column_labels ): psser = self._psser_for(input_label) field = psdf._internal.field_for(output_label).normalize_spark_type() data_fields.append(field) return_schema = field.spark_type applied.append( psser.pandas_on_spark._transform_batch( func=lambda c: func(c, *args, **kwargs), return_type=SeriesType(field.dtype, return_schema), ) ) internal = self._internal.with_new_columns(applied, data_fields=data_fields) return DataFrame(internal) else: return self._apply_series_op( lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs) ) def pop(self, item: Name) -> "DataFrame": """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Label of column to be popped. Returns ------- Series Examples -------- >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN Also support for MultiIndex >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df a b name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('a') name class 0 falcon bird 1 parrot bird 2 lion mammal 3 monkey mammal >>> df b max_speed 0 389.0 1 24.0 2 80.5 3 NaN """ result = self[item] self._update_internal_frame(self.drop(columns=item)._internal) return result # TODO: add axis parameter can work when '1' or 'columns' def xs(self, key: Name, axis: Axis = 0, level: Optional[int] = None) -> DataFrameOrSeries: """ Return cross-section from the DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : 0 or 'index', default 0 Axis to retrieve cross-section on. currently only support 0 or 'index' level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. Returns ------- DataFrame or Series Cross-section from the original DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = ps.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings locomotion walks 4 0 >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class locomotion mammal walks 4 0 """ from pyspark.pandas.series import first_series if not is_name_like_value(key): raise TypeError("'key' should be a scalar value or tuple that contains scalar values") if level is not None and is_name_like_tuple(key): raise KeyError(key) axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if not is_name_like_tuple(key): key = (key,) if len(key) > self._internal.index_level: raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(key), self._internal.index_level ) ) if level is None: level = 0 rows = [ self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level) ] internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows)) if len(key) == self._internal.index_level: psdf: DataFrame = DataFrame(internal) pdf = psdf.head(2)._to_internal_pandas() if len(pdf) == 0: raise KeyError(key) elif len(pdf) > 1: return psdf else: return first_series(DataFrame(pdf.transpose())) else: index_spark_columns = ( internal.index_spark_columns[:level] + internal.index_spark_columns[level + len(key) :] ) index_names = internal.index_names[:level] + internal.index_names[level + len(key) :] index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :] internal = internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, ).resolved_copy return DataFrame(internal) def between_time( self, start_time: Union[datetime.time, str], end_time: Union[datetime.time, str], include_start: bool = True, include_end: bool = True, axis: Axis = 0, ) -> "DataFrame": """ Select values between particular times of the day (example: 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. include_start : bool, default True Whether the start time needs to be included in the result. include_end : bool, default True Whether the end time needs to be included in the result. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. Returns ------- DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx) >>> psdf A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> psdf.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> psdf.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError("between_time currently only works for axis=0") if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("Index must be DatetimeIndex") psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, "__index_name__") return_types = [psdf.index.dtype] + list(psdf.dtypes) def pandas_between_time( # type: ignore[no-untyped-def] pdf, ) -> ps.DataFrame[return_types]: # type: ignore[valid-type] return pdf.between_time(start_time, end_time, include_start, include_end).reset_index() # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach a # default index, which will never be used. So use "distributed" index as a dummy to # avoid overhead. with option_context("compute.default_index_type", "distributed"): psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time) return DataFrame( self._internal.copy( spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:], ) ) # TODO: implement axis=1 def at_time( self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0 ) -> "DataFrame": """ Select values at particular time of day (example: 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx) >>> psdf A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> psdf.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if asof: raise NotImplementedError("'asof' argument is not supported") axis = validate_axis(axis) if axis != 0: raise NotImplementedError("at_time currently only works for axis=0") if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("Index must be DatetimeIndex") psdf = self.copy() psdf.index.name = verify_temp_column_name(psdf, "__index_name__") return_types = [psdf.index.dtype] + list(psdf.dtypes) def pandas_at_time( # type: ignore[no-untyped-def] pdf, ) -> ps.DataFrame[return_types]: # type: ignore[valid-type] return pdf.at_time(time, asof, axis).reset_index() # apply_batch will remove the index of the pandas-on-Spark DataFrame and attach # a default index, which will never be used. So use "distributed" index as a dummy # to avoid overhead. with option_context("compute.default_index_type", "distributed"): psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time) return DataFrame( self._internal.copy( spark_frame=psdf._internal.spark_frame, index_spark_columns=psdf._internal.data_spark_columns[:1], index_fields=psdf._internal.data_fields[:1], data_spark_columns=psdf._internal.data_spark_columns[1:], data_fields=psdf._internal.data_fields[1:], ) ) def where( self, cond: DataFrameOrSeries, other: Union[DataFrameOrSeries, Any] = np.nan, axis: Axis = None, ) -> "DataFrame": """ Replace values where the condition is False. Parameters ---------- cond : boolean DataFrame Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is False are replaced with corresponding value from other. axis : int, default None Can only be set to 0 at the moment for compatibility with pandas. Returns ------- DataFrame Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.where(df1 > 0).sort_index() A B 0 NaN 100.0 1 1.0 200.0 2 2.0 300.0 3 3.0 400.0 4 4.0 500.0 >>> df1.where(df1 > 1, 10).sort_index() A B 0 10 100 1 10 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df1 + 100).sort_index() A B 0 100 100 1 101 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df2).sort_index() A B 0 0 100 1 -1 200 2 2 300 3 3 400 4 4 500 When the column name of cond is different from self, it treats all values are False >>> cond = ps.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0 >>> cond C D 0 True False 1 False True 2 False False 3 True False 4 False True >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN When the type of cond is Series, it just check boolean regardless of column name >>> cond = ps.Series([1, 2]) > 1 >>> cond 0 False 1 True dtype: bool >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 1.0 200.0 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> reset_option("compute.ops_on_diff_frames") """ from pyspark.pandas.series import Series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') tmp_cond_col_name = "__tmp_cond_col_{}__".format tmp_other_col_name = "__tmp_other_col_{}__".format psdf = self.copy() tmp_cond_col_names = [ tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(cond, DataFrame): cond = cond[ [ ( cond._internal.spark_column_for(label) if label in cond._internal.column_labels else SF.lit(False) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_cond_col_names) ] ] psdf[tmp_cond_col_names] = cond elif isinstance(cond, Series): cond = cond.to_frame() cond = cond[ [cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names] ] psdf[tmp_cond_col_names] = cond else: raise TypeError("type of cond must be a DataFrame or Series") tmp_other_col_names = [ tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(other, DataFrame): other = other[ [ ( other._internal.spark_column_for(label) if label in other._internal.column_labels else SF.lit(np.nan) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_other_col_names) ] ] psdf[tmp_other_col_names] = other elif isinstance(other, Series): other = other.to_frame() other = other[ [other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names] ] psdf[tmp_other_col_names] = other else: for label in self._internal.column_labels: psdf[tmp_other_col_name(name_like_string(label))] = other # above logic make spark dataframe looks like below: # +-----------------+---+---+------------------+-------------------+------------------+--... # |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__... # +-----------------+---+---+------------------+-------------------+------------------+--... # | 0| 0|100| true| 0| false| ... # | 1| 1|200| false| -1| false| ... # | 3| 3|400| true| -3| false| ... # | 2| 2|300| false| -2| true| ... # | 4| 4|500| false| -4| false| ... # +-----------------+---+---+------------------+-------------------+------------------+--... data_spark_columns = [] for label in self._internal.column_labels: data_spark_columns.append( F.when( psdf[tmp_cond_col_name(name_like_string(label))].spark.column, psdf._internal.spark_column_for(label), ) .otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column) .alias(psdf._internal.spark_column_name_for(label)) ) return DataFrame( psdf._internal.with_new_columns( data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes? ) ) def mask( self, cond: DataFrameOrSeries, other: Union[DataFrameOrSeries, Any] = np.nan ) -> "DataFrame": """ Replace values where the condition is True. Parameters ---------- cond : boolean DataFrame Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is True are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.mask(df1 > 0).sort_index() A B 0 0.0 NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> df1.mask(df1 > 1, 10).sort_index() A B 0 0 10 1 1 10 2 10 10 3 10 10 4 10 10 >>> df1.mask(df1 > 1, df1 + 100).sort_index() A B 0 0 200 1 1 300 2 102 400 3 103 500 4 104 600 >>> df1.mask(df1 > 1, df2).sort_index() A B 0 0 -100 1 1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> reset_option("compute.ops_on_diff_frames") """ from pyspark.pandas.series import Series if not isinstance(cond, (DataFrame, Series)): raise TypeError("type of cond must be a DataFrame or Series") cond_inversed = cond._apply_series_op(lambda psser: ~psser) return self.where(cond_inversed, other) @property def index(self) -> "Index": """The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index """ from pyspark.pandas.indexes.base import Index return Index._new_instance(self) @property def empty(self) -> bool: """ Returns true if the current DataFrame is empty. Otherwise, returns false. Examples -------- >>> ps.range(10).empty False >>> ps.range(0).empty True >>> ps.DataFrame({}, index=list('abc')).empty True """ return ( len(self._internal.column_labels) == 0 or self._internal.resolved_copy.spark_frame.rdd.isEmpty() ) @property def style(self) -> "Styler": """ Property returning a Styler object containing methods for building a styled HTML representation for the DataFrame. .. note:: currently it collects top 1000 rows and return its pandas `pandas.io.formats.style.Styler` instance. Examples -------- >>> ps.range(1001).style # doctest: +SKIP <pandas.io.formats.style.Styler object at ...> """ max_results = get_option("compute.max_rows") pdf = self.head(max_results + 1)._to_internal_pandas() if len(pdf) > max_results: warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning) return pdf.head(max_results).style def set_index( self, keys: Union[Name, List[Name]], drop: bool = True, append: bool = False, inplace: bool = False, ) -> Optional["DataFrame"]: """Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ps.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 """ inplace = validate_bool_kwarg(inplace, "inplace") key_list: List[Label] if is_name_like_tuple(keys): key_list = [cast(Label, keys)] elif is_name_like_value(keys): key_list = [(keys,)] else: key_list = [key if is_name_like_tuple(key) else (key,) for key in keys] columns = set(self._internal.column_labels) for key in key_list: if key not in columns: raise KeyError(name_like_string(key)) if drop: column_labels = [ label for label in self._internal.column_labels if label not in key_list ] else: column_labels = self._internal.column_labels if append: index_spark_columns = self._internal.index_spark_columns + [ self._internal.spark_column_for(label) for label in key_list ] index_names = self._internal.index_names + key_list index_fields = self._internal.index_fields + [ self._internal.field_for(label) for label in key_list ] else: index_spark_columns = [self._internal.spark_column_for(label) for label in key_list] index_names = key_list index_fields = [self._internal.field_for(label) for label in key_list] internal = self._internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels], data_fields=[self._internal.field_for(label) for label in column_labels], ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def reset_index( self, level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None, drop: bool = False, inplace: bool = False, col_level: int = 0, col_fill: str = "", ) -> Optional["DataFrame"]: """Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ps.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = ps.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df # doctest: +NORMALIZE_WHITESPACE speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") multi_index = self._internal.index_level > 1 def rename(index: int) -> Label: if multi_index: return ("level_{}".format(index),) else: if ("index",) not in self._internal.column_labels: return ("index",) else: return ("level_{}".format(index),) if level is None: new_column_labels = [ name if name is not None else rename(i) for i, name in enumerate(self._internal.index_names) ] new_data_spark_columns = [ scol.alias(name_like_string(label)) for scol, label in zip(self._internal.index_spark_columns, new_column_labels) ] new_data_fields = self._internal.index_fields index_spark_columns = [] index_names = [] index_fields = [] else: if is_list_like(level): level = list(cast(Sequence[Union[int, Name]], level)) if isinstance(level, int) or is_name_like_tuple(level): level_list = [cast(Union[int, Label], level)] elif is_name_like_value(level): level_list = [(level,)] else: level_list = [ lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,) for lvl in level ] if all(isinstance(lvl, int) for lvl in level_list): int_level_list = cast(List[int], level_list) for lev in int_level_list: if lev >= self._internal.index_level: raise IndexError( "Too many levels: Index has only {} level, not {}".format( self._internal.index_level, lev + 1 ) ) idx = int_level_list elif all(is_name_like_tuple(lev) for lev in level_list): idx = [] for label in cast(List[Label], level_list): try: i = self._internal.index_names.index(label) idx.append(i) except ValueError: if multi_index: raise KeyError("Level unknown not found") else: raise KeyError( "Level unknown must be same as name ({})".format( name_like_string(self._internal.index_names[0]) ) ) else: raise ValueError("Level should be all int or all string.") idx.sort() new_column_labels = [] new_data_spark_columns = [] new_data_fields = [] index_spark_columns = self._internal.index_spark_columns.copy() index_names = self._internal.index_names.copy() index_fields = self._internal.index_fields.copy() for i in idx[::-1]: name = index_names.pop(i) new_column_labels.insert(0, name if name is not None else rename(i)) scol = index_spark_columns.pop(i) new_data_spark_columns.insert(0, scol.alias(name_like_string(name))) new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name))) if drop: new_data_spark_columns = [] new_column_labels = [] new_data_fields = [] for label in new_column_labels: if label in self._internal.column_labels: raise ValueError("cannot insert {}, already exists".format(name_like_string(label))) if self._internal.column_labels_level > 1: column_depth = len(self._internal.column_labels[0]) if col_level >= column_depth: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( column_depth, col_level + 1 ) ) if any(col_level + len(label) > column_depth for label in new_column_labels): raise ValueError("Item must have length equal to number of levels.") new_column_labels = [ tuple( ([col_fill] * col_level) + list(label) + ([col_fill] * (column_depth - (len(label) + col_level))) ) for label in new_column_labels ] internal = self._internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_fields=index_fields, column_labels=new_column_labels + self._internal.column_labels, data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns, data_fields=new_data_fields + self._internal.data_fields, ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def isnull(self) -> "DataFrame": """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- DataFrame.notnull Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ return self._apply_series_op(lambda psser: psser.isnull()) isna = isnull def notnull(self) -> "DataFrame": """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- DataFrame.isnull Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ return self._apply_series_op(lambda psser: psser.notnull()) notna = notnull def insert( self, loc: int, column: Name, value: Union[Scalar, "Series", Iterable], allow_duplicates: bool = False, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : int, Series, or array-like allow_duplicates : bool, optional Examples -------- >>> psdf = ps.DataFrame([1, 2, 3]) >>> psdf.sort_index() 0 0 1 1 2 2 3 >>> psdf.insert(0, 'x', 4) >>> psdf.sort_index() x 0 0 4 1 1 4 2 2 4 3 >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> psdf.insert(1, 'y', [5, 6, 7]) >>> psdf.sort_index() x y 0 0 4 5 1 1 4 6 2 2 4 7 3 >>> psdf.insert(2, 'z', ps.Series([8, 9, 10])) >>> psdf.sort_index() x y z 0 0 4 5 8 1 1 4 6 9 2 2 4 7 10 3 >>> reset_option("compute.ops_on_diff_frames") """ if not isinstance(loc, int): raise TypeError("loc must be int") assert 0 <= loc <= len(self.columns) assert allow_duplicates is False if not is_name_like_value(column): raise TypeError( '"column" should be a scalar value or tuple that contains scalar values' ) # TODO(SPARK-37723): Support tuple for non-MultiIndex column name. if is_name_like_tuple(column): if self._internal.column_labels_level > 1: if len(column) != len(self.columns.levels): # type: ignore[attr-defined] # To be consistent with pandas raise ValueError('"column" must have length equal to number of column levels.') else: raise NotImplementedError( "Assigning column name as tuple is only supported for MultiIndex columns " "for now." ) if column in self.columns: raise ValueError("cannot insert %s, already exists" % str(column)) psdf = self.copy() psdf[column] = value columns = psdf.columns[:-1].insert(loc, psdf.columns[-1]) psdf = psdf[columns] self._update_internal_frame(psdf._internal) # TODO: add frep and axis parameter def shift(self, periods: int = 1, fill_value: Optional[Any] = None) -> "DataFrame": """ Shift DataFrame by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. fill_value : object, optional The scalar value to use for newly introduced missing values. The default depends on the dtype of self. For numeric data, np.nan is used. Returns ------- Copy of input DataFrame, shifted. Examples -------- >>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}, ... columns=['Col1', 'Col2', 'Col3']) >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 0 0 0 0 1 0 0 0 2 0 0 0 3 10 13 17 4 20 23 27 """ return self._apply_series_op( lambda psser: psser._shift(periods, fill_value), should_resolve=True ) # TODO: axis should support 1 or 'columns' either at this moment def diff(self, periods: int = 1, axis: Axis = 0) -> "DataFrame": """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : int, default 0 or 'index' Can only be set to 0 at the moment. Returns ------- diffed : DataFrame Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') return self._apply_series_op(lambda psser: psser._diff(periods), should_resolve=True) # TODO: axis should support 1 or 'columns' either at this moment def nunique( self, axis: Axis = 0, dropna: bool = True, approx: bool = False, rsd: float = 0.05, ) -> "Series": """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- axis : int, default 0 or 'index' Can only be set to 0 at the moment. dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and return the exact number of unique. If True, it uses the HyperLogLog approximate algorithm, which is significantly faster for large amount of data. Note: This parameter is specific to pandas-on-Spark and is not found in pandas. rsd: float, default 0.05 Maximum estimation error allowed in the HyperLogLog algorithm. Note: Just like ``approx`` this parameter is specific to pandas-on-Spark. Returns ------- The number of unique values per column as a pandas-on-Spark Series. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(dropna=False) A 3 B 2 dtype: int64 On big data, we recommend using the approximate algorithm to speed up this function. The result will be very close to the exact unique count. >>> df.nunique(approx=True) A 3 B 1 dtype: int64 """ from pyspark.pandas.series import first_series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select( [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + [ self._psser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels ] ) # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = self._internal.copy( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], index_names=[None], index_fields=[None], data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], data_fields=None, ) return first_series(DataFrame(internal).transpose()) def round(self, decimals: Union[int, Dict[Name, int], "Series"] = 0) -> "DataFrame": """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. .. note:: If `decimals` is a Series, it is expected to be small, as all the data is loaded into the driver's memory. Returns ------- DataFrame See Also -------- Series.round Examples -------- >>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076], ... 'B':[0.992815, 0.645646, 0.149370], ... 'C':[0.173891, 0.577595, 0.491027]}, ... columns=['A', 'B', 'C'], ... index=['first', 'second', 'third']) >>> df A B C first 0.028208 0.992815 0.173891 second 0.038683 0.645646 0.577595 third 0.877076 0.149370 0.491027 >>> df.round(2) A B C first 0.03 0.99 0.17 second 0.04 0.65 0.58 third 0.88 0.15 0.49 >>> df.round({'A': 1, 'C': 2}) A B C first 0.0 0.992815 0.17 second 0.0 0.645646 0.58 third 0.9 0.149370 0.49 >>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C']) >>> df.round(decimals) A B C first 0.0 1.0 0.17 second 0.0 1.0 0.58 third 0.9 0.0 0.49 """ if isinstance(decimals, ps.Series): decimals_dict = { k if isinstance(k, tuple) else (k,): v for k, v in decimals._to_internal_pandas().items() } elif isinstance(decimals, dict): decimals_dict = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()} elif isinstance(decimals, int): decimals_dict = {k: decimals for k in self._internal.column_labels} else: raise TypeError("decimals must be an integer, a dict-like or a Series") def op(psser: ps.Series) -> Union[ps.Series, Column]: label = psser._column_label if label in decimals_dict: return F.round(psser.spark.column, decimals_dict[label]) else: return psser return self._apply_series_op(op) def _mark_duplicates( self, subset: Optional[Union[Name, List[Name]]] = None, keep: Union[bool, str] = "first", ) -> Tuple[SparkDataFrame, str]: if subset is None: subset_list = self._internal.column_labels else: if is_name_like_tuple(subset): subset_list = [cast(Label, subset)] elif is_name_like_value(subset): subset_list = [(subset,)] else: subset_list = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset] diff = set(subset_list).difference(set(self._internal.column_labels)) if len(diff) > 0: raise KeyError(", ".join([name_like_string(d) for d in diff])) group_cols = [self._internal.spark_column_name_for(label) for label in subset_list] sdf = self._internal.resolved_copy.spark_frame column = verify_temp_column_name(sdf, "__duplicated__") if keep == "first" or keep == "last": if keep == "first": ord_func = F.asc else: ord_func = F.desc window = ( Window.partitionBy(*group_cols) .orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME)) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) sdf = sdf.withColumn(column, F.row_number().over(window) > 1) elif not keep: window = Window.partitionBy(*group_cols).rowsBetween( Window.unboundedPreceding, Window.unboundedFollowing ) sdf = sdf.withColumn(column, F.count("*").over(window) > 1) else: raise ValueError("'keep' only supports 'first', 'last' and False") return sdf, column def duplicated( self, subset: Optional[Union[Name, List[Name]]] = None, keep: Union[bool, str] = "first", ) -> "Series": """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : Series Examples -------- >>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]}, ... columns = ['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 1 1 1 2 1 1 1 3 3 4 5 >>> df.duplicated().sort_index() 0 False 1 True 2 True 3 False dtype: bool Mark duplicates as ``True`` except for the last occurrence. >>> df.duplicated(keep='last').sort_index() 0 True 1 True 2 False 3 False dtype: bool Mark all duplicates as ``True``. >>> df.duplicated(keep=False).sort_index() 0 True 1 True 2 True 3 False dtype: bool """ from pyspark.pandas.series import first_series sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.select( self._internal.index_spark_columns + [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)] ) return first_series( DataFrame( InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=self._internal.index_names, index_fields=self._internal.index_fields, column_labels=[None], data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)], ) ) ) # TODO: support other as DataFrame or array-like def dot(self, other: "Series") -> "Series": """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series It can also be called using ``self @ other`` in Python >= 3.5. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context( ... 'compute.max_rows', 1000, "compute.ops_on_diff_frames", True ... ): # doctest: +NORMALIZE_WHITESPACE ... psdf = ps.DataFrame({'a': range(1001)}) ... psser = ps.Series([2], index=['a']) ... psdf.dot(psser) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- other : Series The other object to compute the matrix product with. Returns ------- Series Return the matrix product between self and other as a Series. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- >>> from pyspark.pandas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> psser = ps.Series([1, 1, 2, 1]) >>> psdf.dot(psser) 0 -4 1 5 dtype: int64 Note how shuffling of the objects does not change the result. >>> psser2 = psser.reindex([1, 0, 2, 3]) >>> psdf.dot(psser2) 0 -4 1 5 dtype: int64 >>> psdf @ psser2 0 -4 1 5 dtype: int64 >>> reset_option("compute.ops_on_diff_frames") """ if not isinstance(other, ps.Series): raise TypeError("Unsupported type {}".format(type(other).__name__)) else: return cast(ps.Series, other.dot(self.transpose())).rename(None) def __matmul__(self, other: "Series") -> "Series": """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def to_table( self, name: str, format: Optional[str] = None, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: Any, ) -> None: if index_col is None: log_advice( "If `index_col` is not specified for `to_table`, " "the existing index is lost when converting to table." ) mode = validate_mode(mode) return self.spark.to_table(name, format, mode, partition_cols, index_col, **options) to_table.__doc__ = SparkFrameMethods.to_table.__doc__ def to_delta( self, path: str, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: "OptionalPrimitiveType", ) -> None: """ Write the DataFrame out as a Delta Lake table. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Delta Lake. See Also -------- read_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 Create a new Delta Lake table, partitioned by one column: >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') # doctest: +SKIP Partitioned by two columns: >>> df.to_delta('%s/to_delta/bar' % path, ... partition_cols=['date', 'country']) # doctest: +SKIP Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta: >>> df.to_delta('%s/to_delta/bar' % path, ... mode='overwrite', replaceWhere='date >= "2012-01-01"') # doctest: +SKIP """ if index_col is None: log_advice( "If `index_col` is not specified for `to_delta`, " "the existing index is lost when converting to Delta." ) if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore[assignment] mode = validate_mode(mode) self.spark.to_spark_io( path=path, mode=mode, format="delta", partition_cols=partition_cols, index_col=index_col, **options, ) def to_parquet( self, path: str, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, compression: Optional[str] = None, index_col: Optional[Union[str, List[str]]] = None, **options: Any, ) -> None: """ Write the DataFrame out as a Parquet file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'} Compression codec to use when saving to file. If None is set, it uses the value specified in `spark.sql.parquet.compression.codec`. index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_parquet DataFrame.to_delta DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date') >>> df.to_parquet( ... '%s/to_parquet/foo.parquet' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if index_col is None: log_advice( "If `index_col` is not specified for `to_parquet`, " "the existing index is lost when converting to Parquet." ) if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") mode = validate_mode(mode) builder = self.to_spark(index_col=index_col).write.mode(mode) if partition_cols is not None: builder.partitionBy(partition_cols) if compression is not None: builder.option("compression", compression) builder.options(**options).format("parquet").save(path) def to_orc( self, path: str, mode: str = "w", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: "OptionalPrimitiveType", ) -> None: """ Write the DataFrame out as a ORC file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append' (equivalent to 'a'): Append the new data to existing data. - 'overwrite' (equivalent to 'w'): Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent pandas-on-Spark's index. The index name in pandas-on-Spark is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_orc DataFrame.to_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ps.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date') >>> df.to_orc( ... '%s/to_orc/foo.orc' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if index_col is None: log_advice( "If `index_col` is not specified for `to_orc`, " "the existing index is lost when converting to ORC." ) if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore[assignment] mode = validate_mode(mode) self.spark.to_spark_io( path=path, mode=mode, format="orc", partition_cols=partition_cols, index_col=index_col, **options, ) def to_spark_io( self, path: Optional[str] = None, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options: "OptionalPrimitiveType", ) -> None: """An alias for :func:`DataFrame.spark.to_spark_io`. See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`. .. deprecated:: 3.2.0 Use :func:`DataFrame.spark.to_spark_io` instead. """ warnings.warn("Deprecated in 3.2, Use DataFrame.spark.to_spark_io instead.", FutureWarning) return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options) to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__ def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame: if index_col is None: log_advice( "If `index_col` is not specified for `to_spark`, " "the existing index is lost when converting to Spark DataFrame." ) return self._to_spark(index_col) to_spark.__doc__ = SparkFrameMethods.__doc__ def _to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame: """ Same as `to_spark()`, without issueing the advice log when `index_col` is not specified for internal usage. """ return self.spark.frame(index_col) def to_pandas(self) -> pd.DataFrame: """ Return a pandas DataFrame. .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ log_advice( "`to_pandas` loads all data into the driver's memory. " "It should only be used if the resulting pandas DataFrame is expected to be small." ) return self._to_pandas() def _to_pandas(self) -> pd.DataFrame: """ Same as `to_pandas()`, without issueing the advice log for internal usage. """ return self._internal.to_pandas_frame.copy() def assign(self, **kwargs: Any) -> "DataFrame": """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable, Series or Index} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas-on-Spark doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ps.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15, ... temp_idx=df.index) >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']] temp_c temp_f temp_k temp_idx Portland 17.0 62.6 290.15 Portland Berkeley 25.0 77.0 298.15 Berkeley Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in pandas-on-Spark. In pandas-on-Spark, all items are computed first, and then assigned. """ return self._assign(kwargs) def _assign(self, kwargs: Any) -> "DataFrame": assert isinstance(kwargs, dict) from pyspark.pandas.indexes import MultiIndex from pyspark.pandas.series import IndexOpsMixin for k, v in kwargs.items(): is_invalid_assignee = ( not (isinstance(v, (IndexOpsMixin, Column)) or callable(v) or is_scalar(v)) ) or isinstance(v, MultiIndex) if is_invalid_assignee: raise TypeError( "Column assignment doesn't support type " "{0}".format(type(v).__name__) ) if callable(v): kwargs[k] = v(self) pairs = { (k if is_name_like_tuple(k) else (k,)): ( (v.spark.column, v._internal.data_fields[0]) if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex) else (v, None) if isinstance(v, Column) else (SF.lit(v), None) ) for k, v in kwargs.items() } scols = [] data_fields = [] for label in self._internal.column_labels: for i in range(len(label)): if label[: len(label) - i] in pairs: scol, field = pairs[label[: len(label) - i]] name = self._internal.spark_column_name_for(label) scol = scol.alias(name) if field is not None: field = field.copy(name=name) break else: scol = self._internal.spark_column_for(label) field = self._internal.field_for(label) scols.append(scol) data_fields.append(field) column_labels = self._internal.column_labels.copy() for label, (scol, field) in pairs.items(): if label not in set(i[: len(label)] for i in self._internal.column_labels): name = name_like_string(label) scols.append(scol.alias(name)) if field is not None: field = field.copy(name=name) data_fields.append(field) column_labels.append(label) level = self._internal.column_labels_level column_labels = [ tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels ] internal = self._internal.with_new_columns( scols, column_labels=column_labels, data_fields=data_fields ) return DataFrame(internal) @staticmethod def from_records( data: Union[np.ndarray, List[tuple], dict, pd.DataFrame], index: Union[str, list, np.ndarray] = None, exclude: list = None, columns: list = None, coerce_float: bool = False, nrows: int = None, ) -> "DataFrame": """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- df : DataFrame Examples -------- Use dict as input >>> ps.DataFrame.from_records({'A': [1, 2, 3]}) A 0 1 1 2 2 3 Use list of tuples as input >>> ps.DataFrame.from_records([(1, 2), (3, 4)]) 0 1 0 1 2 1 3 4 Use NumPy array as input >>> ps.DataFrame.from_records(np.eye(3)) 0 1 2 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ return DataFrame( pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows) ) def to_records( self, index: bool = True, column_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None, index_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None, ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ps.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')]) """ args = locals() psdf = self return validate_arguments_and_invoke_function( psdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args ) def copy(self, deep: bool = True) -> "DataFrame": """ Make a copy of this object's indices and data. Parameters ---------- deep : bool, default True this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : DataFrame Examples -------- >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df_copy = df.copy() >>> df_copy x y z w 0 1 3 5 7 1 2 4 6 8 """ return DataFrame(self._internal) def dropna( self, axis: Axis = 0, how: str = "any", thresh: Optional[int] = None, subset: Optional[Union[Name, List[Name]]] = None, inplace: bool = False, ) -> Optional["DataFrame"]: """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ps.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, "inplace") if thresh is None: if how is None: raise TypeError("must specify how or thresh") elif how not in ("any", "all"): raise ValueError("invalid how option: {h}".format(h=how)) labels: Optional[List[Label]] if subset is not None: if isinstance(subset, str): labels = [(subset,)] elif isinstance(subset, tuple): labels = [subset] else: labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] else: labels = None if axis == 0: if labels is not None: invalids = [label for label in labels if label not in self._internal.column_labels] if len(invalids) > 0: raise KeyError(invalids) else: labels = self._internal.column_labels cnt = reduce( lambda x, y: x + y, [ F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0) for label in labels ], SF.lit(0), ) if thresh is not None: pred = cnt >= SF.lit(int(thresh)) elif how == "any": pred = cnt == SF.lit(len(labels)) elif how == "all": pred = cnt > SF.lit(0) internal = self._internal.with_filter(pred) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) else: assert axis == 1 internal = self._internal.resolved_copy if labels is not None: if any(len(lbl) != internal.index_level for lbl in labels): raise ValueError( "The length of each subset must be the same as the index size." ) cond = reduce( lambda x, y: x | y, [ reduce( lambda x, y: x & y, [ scol == SF.lit(part) for part, scol in zip(lbl, internal.index_spark_columns) ], ) for lbl in labels ], ) internal = internal.with_filter(cond) psdf: DataFrame = DataFrame(internal) null_counts = [] for label in internal.column_labels: psser = psdf._psser_for(label) cond = psser.isnull().spark.column null_counts.append( F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label)) ) counts = internal.spark_frame.select(null_counts + [F.count("*")]).head() if thresh is not None: column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) >= int(thresh) ] elif how == "any": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) == counts[-1] ] elif how == "all": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0 ] psdf = self[column_labels] if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf # TODO: add 'limit' when value parameter exists def fillna( self, value: Optional[Union[Any, Dict[Name, Any]]] = None, method: Optional[str] = None, axis: Optional[Axis] = None, inplace: bool = False, limit: Optional[int] = None, ) -> Optional["DataFrame"]: """Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ps.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is not None: if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value).__name__) if limit is not None: raise ValueError("limit parameter for value is not support now") if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v).__name__) value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()} def op(psser: ps.Series) -> ps.Series: label = psser._column_label for k, v in value.items(): if k == label[: len(k)]: return psser._fillna( value=value[k], method=method, axis=axis, limit=limit ) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) elif method is not None: def op(psser: ps.Series) -> ps.Series: return psser._fillna(value=value, method=method, axis=axis, limit=limit) else: raise ValueError("Must specify a fillna 'value' or 'method' parameter.") psdf = self._apply_series_op(op, should_resolve=(method is not None)) inplace = validate_bool_kwarg(inplace, "inplace") if inplace: self._update_internal_frame(psdf._internal, check_same_anchor=False) return None else: return psdf def interpolate( self, method: str = "linear", limit: Optional[int] = None, limit_direction: Optional[str] = None, limit_area: Optional[str] = None, ) -> "DataFrame": if method not in ["linear"]: raise NotImplementedError("interpolate currently works only for method='linear'") if (limit is not None) and (not limit > 0): raise ValueError("limit must be > 0.") if (limit_direction is not None) and ( limit_direction not in ["forward", "backward", "both"] ): raise ValueError("invalid limit_direction: '{}'".format(limit_direction)) if (limit_area is not None) and (limit_area not in ["inside", "outside"]): raise ValueError("invalid limit_area: '{}'".format(limit_area)) numeric_col_names = [] for label in self._internal.column_labels: psser = self._psser_for(label) if isinstance(psser.spark.data_type, (NumericType, BooleanType)): numeric_col_names.append(psser.name) psdf = self[numeric_col_names] return psdf._apply_series_op( lambda psser: psser._interpolate( method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area ), should_resolve=True, ) def replace( self, to_replace: Optional[Union[Any, List, Tuple, Dict]] = None, value: Optional[Any] = None, inplace: bool = False, limit: Optional[int] = None, regex: bool = False, method: str = "pad", ) -> Optional["DataFrame"]: """ Returns a new DataFrame replacing a value with another value. Parameters ---------- to_replace : int, float, string, list, tuple or dict Value to be replaced. value : int, float, string, list or tuple Value to use to replace holes. The replacement value must be an int, float, or string. If value is a list or tuple, value should be of the same length with to_replace. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame Object after replacement. Examples -------- >>> df = ps.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'], ... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']}, ... columns=['name', 'weapon']) >>> df name weapon 0 Ironman Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash Scalar `to_replace` and `value` >>> df.replace('Ironman', 'War-Machine') name weapon 0 War-Machine Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash List like `to_replace` and `value` >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True) >>> df name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Mjolnir 3 Hulk Smash Dicts can be used to specify different replacement values for different existing values To use a dict in this way the value parameter should be None >>> df.replace({'Mjolnir': 'Stormbuster'}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Dict can specify that different values should be replaced in different columns The value parameter should not be None in this case >>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster') name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Nested dictionaries The value parameter should be None to use a nested dict in this way >>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash """ if method != "pad": raise NotImplementedError("replace currently works only for method='pad") if limit is not None: raise NotImplementedError("replace currently works only when limit=None") if regex is not False: raise NotImplementedError("replace currently doesn't supports regex") inplace = validate_bool_kwarg(inplace, "inplace") if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)): raise TypeError("Unsupported type {}".format(type(value).__name__)) if to_replace is not None and not isinstance( to_replace, (int, float, str, list, tuple, dict) ): raise TypeError("Unsupported type {}".format(type(to_replace).__name__)) if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)): if len(value) != len(to_replace): raise ValueError("Length of to_replace and value must be same") if isinstance(to_replace, dict) and ( value is not None or all(isinstance(i, dict) for i in to_replace.values()) ): to_replace_dict = to_replace def op(psser: ps.Series) -> ps.Series: if psser.name in to_replace_dict: return psser.replace( to_replace=to_replace_dict[psser.name], value=value, regex=regex ) else: return psser else: def op(psser: ps.Series) -> ps.Series: return psser.replace(to_replace=to_replace, value=value, regex=regex) psdf = self._apply_series_op(op) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame": """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ps.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ps.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types. """ if is_list_like(lower) or is_list_like(upper): raise TypeError( "List-like value are not supported for 'lower' and 'upper' at the " + "moment" ) if lower is None and upper is None: return self return self._apply_series_op(lambda psser: psser.clip(lower=lower, upper=upper)) def head(self, n: int = 5) -> "DataFrame": """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ if n < 0: n = len(self) + n if n <= 0: return DataFrame(self._internal.with_filter(SF.lit(False))) else: sdf = self._internal.resolved_copy.spark_frame if get_option("compute.ordered_head"): sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME) return DataFrame(self._internal.with_new_sdf(sdf.limit(n))) def last(self, offset: Union[str, DateOffset]) -> "DataFrame": """ Select final periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the last few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` Examples -------- >>> index = pd.date_range('2018-04-09', periods=4, freq='2D') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index) >>> psdf A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> psdf.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ # Check index type should be format DateTime if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert offset_ is not None from_date = cast(datetime.datetime, self.index.max()) - offset_ # type: ignore[operator] return cast(DataFrame, self.loc[from_date:]) def first(self, offset: Union[str, DateOffset]) -> "DataFrame": """ Select first periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the first 3 days. Returns ------- DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` Examples -------- >>> index = pd.date_range('2018-04-09', periods=4, freq='2D') >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index) >>> psdf A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> psdf.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 observed days in the dataset, and therefore data for 2018-04-13 was not returned. """ # Check index type should be format DatetimeIndex if not isinstance(self.index, ps.DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex") offset_: Optional[DateOffset] = to_offset(offset) assert offset_ is not None to_date = cast(datetime.datetime, self.index.min()) + offset_ # type: ignore[operator] return cast(DataFrame, self.loc[:to_date]) # type: ignore[misc] def pivot_table( self, values: Optional[Union[Name, List[Name]]] = None, index: Optional[List[Name]] = None, columns: Optional[Name] = None, aggfunc: Union[str, Dict[Name, str]] = "mean", fill_value: Optional[Any] = None, ) -> "DataFrame": """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- values : column to aggregate. They should be either a list less than three or a string. index : column (string) or list of columns If an array is passed, it must be the same length as the data. The list should contain string. columns : column Columns used in the pivot operation. Only one column is supported and it should be a string. aggfunc : function (string), dict, default mean If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with. Returns ------- table : DataFrame Examples -------- >>> df = ps.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}, ... columns=['A', 'B', 'C', 'D', 'E']) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum') >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4.0 5 two 7.0 6 foo one 4.0 1 two NaN 6 We can also fill missing values using the `fill_value` parameter. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum', fill_value=0) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 We can also calculate multiple types of aggregations for any given value column. >>> table = df.pivot_table(values=['D'], index =['C'], ... columns="A", aggfunc={'D': 'mean'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D A bar foo C large 5.5 2.000000 small 5.5 2.333333 The next example aggregates on multiple values. >>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'], ... aggfunc={'D': 'mean', 'E': 'sum'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D E A bar foo bar foo C large 5.5 2.000000 15 9 small 5.5 2.333333 17 13 """ if not is_name_like_value(columns): raise TypeError("columns should be one column name.") if not is_name_like_value(values) and not ( isinstance(values, list) and all(is_name_like_value(v) for v in values) ): raise TypeError("values should be one column or list of columns.") if not isinstance(aggfunc, str) and ( not isinstance(aggfunc, dict) or not all( is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items() ) ): raise TypeError( "aggfunc must be a dict mapping from column name " "to aggregate functions (string)." ) if isinstance(aggfunc, dict) and index is None: raise NotImplementedError( "pivot_table doesn't support aggfunc" " as dict and without index." ) if isinstance(values, list) and index is None: raise NotImplementedError("values can't be a list without index.") if columns not in self.columns: raise ValueError("Wrong columns {}.".format(name_like_string(columns))) if not is_name_like_tuple(columns): columns = (columns,) if isinstance(values, list): values = [col if is_name_like_tuple(col) else (col,) for col in values] if not all( isinstance(self._internal.spark_type_for(col), NumericType) for col in values ): raise TypeError("values should be a numeric type.") else: values = values if is_name_like_tuple(values) else (values,) if not isinstance(self._internal.spark_type_for(values), NumericType): raise TypeError("values should be a numeric type.") if isinstance(aggfunc, str): if isinstance(values, list): agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(value), aggfunc ) ) for value in values ] else: agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(values), aggfunc ) ) ] elif isinstance(aggfunc, dict): aggfunc = { key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items() } agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value) ) for key, value in aggfunc.items() ] agg_columns = [key for key, _ in aggfunc.items()] if set(agg_columns) != set(values): raise ValueError("Columns in aggfunc must be the same as values.") sdf = self._internal.resolved_copy.spark_frame if index is None: sdf = ( sdf.groupBy() .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) elif isinstance(index, list): index = [label if is_name_like_tuple(label) else (label,) for label in index] sdf = ( sdf.groupBy([self._internal.spark_column_name_for(label) for label in index]) .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) else: raise TypeError("index should be a None or a list of columns.") if fill_value is not None and isinstance(fill_value, (int, float)): sdf = sdf.fillna(fill_value) psdf: DataFrame if index is not None: index_columns = [self._internal.spark_column_name_for(label) for label in index] index_fields = [self._internal.field_for(label) for label in index] if isinstance(values, list): data_columns = [column for column in sdf.columns if column not in index_columns] if len(values) > 1: # If we have two values, Spark will return column's name # in this format: column_values, where column contains # their values in the DataFrame and values is # the column list passed to the pivot_table(). # E.g. if column is b and values is ['b','e'], # then ['2_b', '2_e', '3_b', '3_e']. # We sort the columns of Spark DataFrame by values. data_columns.sort(key=lambda x: x.split("_", 1)[1]) sdf = sdf.select(index_columns + data_columns) column_name_to_index = dict( zip(self._internal.data_spark_column_names, self._internal.column_labels) ) column_labels = [ tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]]) for name in data_columns ] column_label_names = ( [cast(Optional[Name], None)] * column_labels_level(values) ) + [columns] internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) psdf = DataFrame(internal) else: column_labels = [tuple(list(values[0]) + [column]) for column in data_columns] column_label_names = ([cast(Optional[Name], None)] * len(values[0])) + [columns] internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) psdf = DataFrame(internal) else: internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_fields=index_fields, column_label_names=[columns], ) psdf = DataFrame(internal) else: index_values = values index_map: Dict[str, Optional[Label]] = {} for i, index_value in enumerate(index_values): colname = SPARK_INDEX_NAME_FORMAT(i) sdf = sdf.withColumn(colname, SF.lit(index_value)) index_map[colname] = None internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()], index_names=list(index_map.values()), column_label_names=[columns], ) psdf = DataFrame(internal) psdf_columns = psdf.columns if isinstance(psdf_columns, pd.MultiIndex): psdf.columns = psdf_columns.set_levels( psdf_columns.levels[-1].astype( # type: ignore[index] spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type) ), level=-1, ) else: psdf.columns = psdf_columns.astype( spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type) ) return psdf def pivot( self, index: Optional[Name] = None, columns: Optional[Name] = None, values: Optional[Name] = None, ) -> "DataFrame": """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation. Parameters ---------- index : string, optional Column to use to make new frame's index. If None, uses existing index. columns : string Column to use to make new frame's columns. values : string, object or a list of the previous Column(s) to use for populating new frame's values. Returns ------- DataFrame Returns reshaped DataFrame. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. Examples -------- >>> df = ps.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}, ... columns=['foo', 'bar', 'baz', 'zoo']) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE bar A B C 0 1.0 NaN NaN 1 NaN 2.0 NaN 2 NaN NaN 3.0 3 4.0 NaN NaN 4 NaN 5.0 NaN 5 NaN NaN 6.0 Notice that, unlike pandas raises an ValueError when duplicated values are found, pandas-on-Spark's pivot still works with its first value it meets during operation because pivot is an expensive operation and it is preferred to permissively execute over failing fast when processing large data. >>> df = ps.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz']) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1.0 NaN NaN two NaN 3.0 4.0 It also support multi-index and multi-index column. >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')]) >>> df = df.set_index(('a', 'bar'), append=True) >>> df # doctest: +NORMALIZE_WHITESPACE a b foo baz (a, bar) 0 A one 1 1 A one 2 2 B two 3 3 C two 4 >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index() ... # doctest: +NORMALIZE_WHITESPACE ('a', 'foo') one two (a, bar) 0 A 1.0 NaN 1 A 2.0 NaN 2 B NaN 3.0 3 C NaN 4.0 """ if columns is None: raise ValueError("columns should be set.") if values is None: raise ValueError("values should be set.") should_use_existing_index = index is not None if should_use_existing_index: df = self index_labels = [index] else: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index_labels = df._internal.column_labels[: self._internal.index_level] df = df.pivot_table(index=index_labels, columns=columns, values=values, aggfunc="first") if should_use_existing_index: return df else: internal = df._internal.copy(index_names=self._internal.index_names) return DataFrame(internal) @property def columns(self) -> pd.Index: """The column labels of the DataFrame.""" names = [ name if name is None or len(name) > 1 else name[0] for name in self._internal.column_label_names ] if self._internal.column_labels_level > 1: columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names) else: columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0]) return columns @columns.setter def columns(self, columns: Union[pd.Index, List[Name]]) -> None: if isinstance(columns, pd.MultiIndex): column_labels = columns.tolist() else: column_labels = [ col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns ] if len(self._internal.column_labels) != len(column_labels): raise ValueError( "Length mismatch: Expected axis has {} elements, " "new values have {} elements".format( len(self._internal.column_labels), len(column_labels) ) ) column_label_names: Optional[List] if isinstance(columns, pd.Index): column_label_names = [ name if is_name_like_tuple(name) else (name,) for name in columns.names ] else: column_label_names = None pssers = [ self._psser_for(label).rename(name) for label, name in zip(self._internal.column_labels, column_labels) ] self._update_internal_frame( self._internal.with_new_columns(pssers, column_label_names=column_label_names) ) @property def dtypes(self) -> pd.Series: """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ps.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series( [self._psser_for(label).dtype for label in self._internal.column_labels], index=pd.Index( [label if len(label) > 1 else label[0] for label in self._internal.column_labels] ), ) def select_dtypes( self, include: Optional[Union[str, List[str]]] = None, exclude: Optional[Union[str, List[str]]] = None, ) -> "DataFrame": """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. It also takes Spark SQL DDL type strings, for instance, 'string' and 'date'. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes() Traceback (most recent call last): ... ValueError: at least one of include or exclude must be nonempty * If ``include`` and ``exclude`` have overlapping elements >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes(include='a', exclude='a') Traceback (most recent call last): ... ValueError: include and exclude overlap on {'a'} Notes ----- * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` Examples -------- >>> df = ps.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3, ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 True 1.0 a 1 2 False 2.0 b 2 1 True 1.0 a 3 2 False 2.0 b 4 1 True 1.0 a 5 2 False 2.0 b >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64'], exclude=['int']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(include=['int'], exclude=['float64']) a 0 1 1 2 2 1 3 2 4 1 5 2 >>> df.select_dtypes(exclude=['int']) b c d 0 True 1.0 a 1 False 2.0 b 2 True 1.0 a 3 False 2.0 b 4 True 1.0 a 5 False 2.0 b Spark SQL DDL type strings can be used as well. >>> df.select_dtypes(exclude=['string']) a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 """ from pyspark.sql.types import _parse_datatype_string include_list: List[str] if not is_list_like(include): include_list = [cast(str, include)] if include is not None else [] else: include_list = list(include) exclude_list: List[str] if not is_list_like(exclude): exclude_list = [cast(str, exclude)] if exclude is not None else [] else: exclude_list = list(exclude) if not any((include_list, exclude_list)): raise ValueError("at least one of include or exclude must be " "nonempty") # can't both include AND exclude! if set(include_list).intersection(set(exclude_list)): raise ValueError( "include and exclude overlap on {inc_ex}".format( inc_ex=set(include_list).intersection(set(exclude_list)) ) ) # Handle Spark types include_spark_type = [] for inc in include_list: try: include_spark_type.append(_parse_datatype_string(inc)) except BaseException: pass exclude_spark_type = [] for exc in exclude_list: try: exclude_spark_type.append(_parse_datatype_string(exc)) except BaseException: pass # Handle pandas types include_numpy_type = [] for inc in include_list: try: include_numpy_type.append(infer_dtype_from_object(inc)) except BaseException: pass exclude_numpy_type = [] for exc in exclude_list: try: exclude_numpy_type.append(infer_dtype_from_object(exc)) except BaseException: pass column_labels = [] for label in self._internal.column_labels: if len(include_list) > 0: should_include = ( infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type or self._internal.spark_type_for(label) in include_spark_type ) else: should_include = not ( infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type or self._internal.spark_type_for(label) in exclude_spark_type ) if should_include: column_labels.append(label) return DataFrame( self._internal.with_new_columns([self._psser_for(label) for label in column_labels]) ) def droplevel( self, level: Union[int, Name, List[Union[int, Name]]], axis: Axis = 0 ) -> "DataFrame": """ Return DataFrame with requested index / column level(s) removed. Parameters ---------- level: int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis: {0 or ‘index’, 1 or ‘columns’}, default 0 Returns ------- DataFrame with requested index / column level(s) removed. Examples -------- >>> df = ps.DataFrame( ... [[3, 4], [7, 8], [11, 12]], ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]), ... ) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ axis = validate_axis(axis) if axis == 0: if not isinstance(level, (tuple, list)): # huh? level = [level] names = self.index.names nlevels = self._internal.index_level int_level = set() for n in level: if isinstance(n, int): if n < 0: n = n + nlevels if n < 0: raise IndexError( "Too many levels: Index has only {} levels, " "{} is not a valid level number".format(nlevels, (n - nlevels)) ) if n >= nlevels: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( nlevels, (n + 1) ) ) else: if n not in names: raise KeyError("Level {} not found".format(n)) n = names.index(n) int_level.add(n) if len(level) >= nlevels: raise ValueError( "Cannot remove {} levels from an index with {} levels: " "at least one level must be left.".format(len(level), nlevels) ) index_spark_columns, index_names, index_fields = zip( *[ item for i, item in enumerate( zip( self._internal.index_spark_columns, self._internal.index_names, self._internal.index_fields, ) ) if i not in int_level ] ) internal = self._internal.copy( index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_fields=list(index_fields), ) return DataFrame(internal) else: psdf = self.copy() psdf.columns = psdf.columns.droplevel(level) # type: ignore[arg-type] return psdf def drop( self, labels: Optional[Union[Name, List[Name]]] = None, axis: Optional[Axis] = 0, index: Union[Name, List[Name]] = None, columns: Union[Name, List[Name]] = None, ) -> "DataFrame": """ Drop specified labels from columns. Remove rows and/or columns by specifying label names and corresponding axis, or by specifying directly index and/or column names. Drop rows of a MultiIndex DataFrame is not supported yet. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionchanged:: 3.3 Set dropping by index by default. index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is quivalent to ``index=columns``). .. versionchanged:: 3.3 Added dropping rows by 'index'. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 >>> df.drop(index=[0, 1], columns='A') B C D 2 9 10 11 Also support dropping columns for MultiIndex >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df # doctest: +NORMALIZE_WHITESPACE a b x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE b z w 0 5 7 1 6 8 Notes ----- Currently, dropping rows of a MultiIndex DataFrame is not supported yet. """ if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis = validate_axis(axis) if axis == 1: return self.drop(index=index, columns=labels) else: return self.drop(index=labels, columns=columns) else: if index is None and columns is None: raise ValueError("Need to specify at least one of 'labels' or 'columns' or 'index'") internal = self._internal if index is not None: if is_name_like_tuple(index) or is_name_like_value(index): index = [index] if len(index) > 0: if internal.index_level == 1: internal = internal.resolved_copy if len(index) <= ps.get_option("compute.isin_limit"): self_index_type = self.index.spark.data_type cond = ~internal.index_spark_columns[0].isin( [SF.lit(label).cast(self_index_type) for label in index] ) internal = internal.with_filter(cond) else: index_sdf_col = "__index" index_sdf = default_session().createDataFrame( pd.DataFrame({index_sdf_col: index}) ) joined_sdf = internal.spark_frame.join( other=F.broadcast(index_sdf), on=( internal.index_spark_columns[0] == scol_for(index_sdf, index_sdf_col) ), how="anti", ) internal = internal.with_new_sdf(joined_sdf) else: raise NotImplementedError( "Drop rows of MultiIndex DataFrame is not supported yet" ) if columns is not None: if is_name_like_tuple(columns): columns = [columns] elif is_name_like_value(columns): columns = [(columns,)] else: columns = [col if is_name_like_tuple(col) else (col,) for col in columns] if len(columns) > 0: drop_column_labels = set( label for label in internal.column_labels for col in columns if label[: len(col)] == col ) if len(drop_column_labels) == 0: raise KeyError(columns) keep_columns_and_labels = [ (column, label) for column, label in zip( self._internal.data_spark_column_names, self._internal.column_labels ) if label not in drop_column_labels ] cols, labels = ( zip(*keep_columns_and_labels) if len(keep_columns_and_labels) > 0 else ([], []) ) internal = internal.with_new_columns( [self._psser_for(label) for label in labels] ) return DataFrame(internal) def _prepare_sort_by_scols(self, by: Union[Name, List[Name]]) -> List[Column]: if is_name_like_value(by): by = [by] else: assert is_list_like(by), type(by) new_by = [] for colname in by: ser = self[colname] if not isinstance(ser, ps.Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(colname) ) new_by.append(ser.spark.column) return new_by def _sort( self, by: List[Column], ascending: Union[bool, List[bool]], na_position: str, keep: str = "first", ) -> "DataFrame": if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError( "Length of ascending ({}) != length of by ({})".format(len(ascending), len(by)) ) if na_position not in ("first", "last"): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination mapper = { (True, "first"): Column.asc_nulls_first, (True, "last"): Column.asc_nulls_last, (False, "first"): Column.desc_nulls_first, (False, "last"): Column.desc_nulls_last, } by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)] natural_order_scol = F.col(NATURAL_ORDER_COLUMN_NAME) if keep == "last": natural_order_scol = Column.desc(natural_order_scol) elif keep == "all": raise NotImplementedError("`keep`=all is not implemented yet.") elif keep != "first": raise ValueError('keep must be either "first", "last" or "all".') sdf = self._internal.resolved_copy.spark_frame.sort(*by, natural_order_scol) return DataFrame(self._internal.with_new_sdf(sdf)) def sort_values( self, by: Union[Name, List[Name]], ascending: Union[bool, List[bool]] = True, inplace: bool = False, na_position: str = "last", ignore_index: bool = False, ) -> Optional["DataFrame"]: """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ps.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3'], ... index=['a', 'b', 'c', 'd', 'e']) >>> df col1 col2 col3 a A 2 0 b B 9 9 c None 8 4 d D 7 2 e C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 a A 2 0 b B 9 9 e C 4 3 d D 7 2 c None 8 4 Ignore index for the resulting axis >>> df.sort_values(by=['col1'], ignore_index=True) col1 col2 col3 0 A 2 0 1 B 9 9 2 C 4 3 3 D 7 2 4 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 d D 7 2 e C 4 3 b B 9 9 a A 2 0 c None 8 4 Sort by multiple columns >>> df = ps.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ inplace = validate_bool_kwarg(inplace, "inplace") new_by = self._prepare_sort_by_scols(by) psdf = self._sort(by=new_by, ascending=ascending, na_position=na_position) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return psdf.reset_index(drop=True) if ignore_index else psdf def sort_index( self, axis: Axis = 0, level: Optional[Union[int, List[int]]] = None, ascending: bool = True, inplace: bool = False, kind: str = None, na_position: str = "last", ignore_index: bool = False, ) -> Optional["DataFrame"]: """ Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None pandas-on-Spark does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 3.4.0 Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan]) >>> df.sort_index() A a 1.0 b 2.0 None NaN >>> df.sort_index(ascending=False) A b 2.0 a 1.0 None NaN >>> df.sort_index(na_position='first') A None NaN a 1.0 b 2.0 >>> df.sort_index(ignore_index=True) A 0 1.0 1 2.0 2 NaN >>> df.sort_index(inplace=True) >>> df A a 1.0 b 2.0 None NaN >>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]}, ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], ... columns=['A', 'B']) >>> df.sort_index() A B a 0 3 0 1 2 1 b 0 1 2 1 0 3 >>> df.sort_index(level=1) A B b 0 1 2 a 0 3 0 b 1 0 3 a 1 2 1 >>> df.sort_index(level=[1, 0]) A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 >>> df.sort_index(ignore_index=True) A B 0 3 0 1 2 1 2 1 2 3 0 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = validate_axis(axis) if axis != 0: raise NotImplementedError("No other axis than 0 are supported at the moment") if kind is not None: raise NotImplementedError( "Specifying the sorting algorithm is not supported at the moment." ) if level is None or (is_list_like(level) and len(level) == 0): # type: ignore[arg-type] by = self._internal.index_spark_columns elif is_list_like(level): by = [ self._internal.index_spark_columns[lvl] for lvl in level # type: ignore[union-attr] ] else: by = [self._internal.index_spark_columns[level]] # type: ignore[index] psdf = self._sort(by=by, ascending=ascending, na_position=na_position) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return psdf.reset_index(drop=True) if ignore_index else psdf def swaplevel( self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, axis: Axis = 0 ) -> "DataFrame": """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to swap levels on. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. Returns ------- DataFrame DataFrame with levels swapped in MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_arrays( ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size']) >>> midx # doctest: +SKIP MultiIndex([( 'red', 1, 's'), ('blue', 2, 'm')], names=['color', 'number', 'size']) Swap levels in a MultiIndex on index. >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx) >>> psdf # doctest: +NORMALIZE_WHITESPACE x y color number size red 1 s 5 5 blue 2 m 6 6 >>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 >>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE x y number color size 1 red s 5 5 2 blue m 6 6 >>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 Swap levels in a MultiIndex on columns. >>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}) >>> psdf.columns = midx >>> psdf color red blue number 1 2 size s m 0 5 5 1 6 6 >>> psdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> psdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> psdf.swaplevel(0, 1, axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6 >>> psdf.swaplevel('number', 'color', axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6 """ axis = validate_axis(axis) if axis == 0: internal = self._swaplevel_index(i, j) else: assert axis == 1 internal = self._swaplevel_columns(i, j) return DataFrame(internal) def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> "DataFrame": """ Interchange axes and swap values axes appropriately. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from pyspark.pandas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- i: {0 or 'index', 1 or 'columns'}. The axis to swap. j: {0 or 'index', 1 or 'columns'}. The axis to swap. copy : bool, default True. Returns ------- DataFrame Examples -------- >>> psdf = ps.DataFrame( ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c'] ... ) >>> psdf a b c x 1 2 3 y 4 5 6 z 7 8 9 >>> psdf.swapaxes(i=1, j=0) x y z a 1 4 7 b 2 5 8 c 3 6 9 >>> psdf.swapaxes(i=1, j=1) a b c x 1 2 3 y 4 5 6 z 7 8 9 """ assert copy is True i = validate_axis(i) j = validate_axis(j) return self.copy() if i == j else self.transpose() def _swaplevel_columns(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame: assert isinstance(self.columns, pd.MultiIndex) for index in (i, j): if not isinstance(index, int) and index not in self.columns.names: raise KeyError("Level %s not found" % index) i = i if isinstance(i, int) else self.columns.names.index(i) j = j if isinstance(j, int) else self.columns.names.index(j) for index in (i, j): if index >= len(self.columns) or index < -len(self.columns): raise IndexError( "Too many levels: Columns have only %s levels, " "%s is not a valid level number" % (self._internal.index_level, index) ) column_label_names = self._internal.column_label_names.copy() column_label_names[i], column_label_names[j], = ( column_label_names[j], column_label_names[i], ) column_labels = self._internal._column_labels column_label_list = [list(label) for label in column_labels] for label_list in column_label_list: label_list[i], label_list[j] = label_list[j], label_list[i] column_labels = [tuple(x) for x in column_label_list] internal = self._internal.copy( column_label_names=list(column_label_names), column_labels=list(column_labels) ) return internal def _swaplevel_index(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame: assert isinstance(self.index, ps.MultiIndex) for index in (i, j): if not isinstance(index, int) and index not in self.index.names: raise KeyError("Level %s not found" % index) i = i if isinstance(i, int) else self.index.names.index(i) j = j if isinstance(j, int) else self.index.names.index(j) for index in (i, j): if index >= self._internal.index_level or index < -self._internal.index_level: raise IndexError( "Too many levels: Index has only %s levels, " "%s is not a valid level number" % (self._internal.index_level, index) ) index_map = list( zip( self._internal.index_spark_columns, self._internal.index_names, self._internal.index_fields, ) ) index_map[i], index_map[j] = index_map[j], index_map[i] index_spark_columns, index_names, index_fields = zip(*index_map) internal = self._internal.copy( index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_fields=list(index_fields), ) return internal def nlargest( self, n: int, columns: Union[Name, List[Name]], keep: str = "first" ) -> "DataFrame": """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in pandas. In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet. Determines which duplicates (if any) to keep. - ``first`` : Keep the first occurrence. - ``last`` : Keep the last occurrence. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "X". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 To order by the largest values in column "Y" and then "X", we can specify multiple columns like in the next example. >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 The examples below show how ties are resolved, which is decided by `keep`. >>> tied_df = ps.DataFrame({'X': [1, 2, 2, 3, 3]}, index=['a', 'b', 'c', 'd', 'e']) >>> tied_df X a 1 b 2 c 2 d 3 e 3 When using keep='first' (by default), ties are resolved in order: >>> tied_df.nlargest(3, 'X') X d 3 e 3 b 2 >>> tied_df.nlargest(3, 'X', keep='first') X d 3 e 3 b 2 When using keep='last', ties are resolved in reverse order: >>> tied_df.nlargest(3, 'X', keep='last') X e 3 d 3 c 2 """ by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=False, na_position="last", keep=keep).head(n=n) def nsmallest( self, n: int, columns: Union[Name, List[Name]], keep: str = "first" ) -> "DataFrame": """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet. Determines which duplicates (if any) to keep. - ``first`` : Keep the first occurrence. - ``last`` : Keep the last occurrence. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "X". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the smallest values in column "Y" and then "X", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 The examples below show how ties are resolved, which is decided by `keep`. >>> tied_df = ps.DataFrame({'X': [1, 1, 2, 2, 3]}, index=['a', 'b', 'c', 'd', 'e']) >>> tied_df X a 1 b 1 c 2 d 2 e 3 When using keep='first' (by default), ties are resolved in order: >>> tied_df.nsmallest(3, 'X') X a 1 b 1 c 2 >>> tied_df.nsmallest(3, 'X', keep='first') X a 1 b 1 c 2 When using keep='last', ties are resolved in reverse order: >>> tied_df.nsmallest(3, 'X', keep='last') X b 1 a 1 d 2 """ by_scols = self._prepare_sort_by_scols(columns) return self._sort(by=by_scols, ascending=True, na_position="last", keep=keep).head(n=n) def isin(self, values: Union[List, Dict]) -> "DataFrame": """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns)) ) data_spark_columns = [] if isinstance(values, dict): for i, col in enumerate(self.columns): if col in values: item = values[col] item = item.tolist() if isinstance(item, np.ndarray) else list(item) scol = self._internal.spark_column_for(self._internal.column_labels[i]).isin( [SF.lit(v) for v in item] ) scol = F.coalesce(scol, F.lit(False)) else: scol = SF.lit(False) data_spark_columns.append(scol.alias(self._internal.data_spark_column_names[i])) elif is_list_like(values): values = ( cast(np.ndarray, values).tolist() if isinstance(values, np.ndarray) else list(values) ) for label in self._internal.column_labels: scol = self._internal.spark_column_for(label).isin([SF.lit(v) for v in values]) scol = F.coalesce(scol, F.lit(False)) data_spark_columns.append(scol.alias(self._internal.spark_column_name_for(label))) else: raise TypeError("Values should be iterable, Series, DataFrame or dict.") return DataFrame( self._internal.with_new_columns( data_spark_columns, data_fields=[ field.copy(dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False) for field in self._internal.data_fields ], ) ) @property def shape(self) -> Tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge( self, right: "DataFrame", how: str = "inner", on: Optional[Union[Name, List[Name]]] = None, left_on: Optional[Union[Name, List[Name]]] = None, right_on: Optional[Union[Name, List[Name]]] = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ("_x", "_y"), ) -> "DataFrame": """ Merge DataFrame objects with a database-style join. The index of the resulting DataFrame will be one of the following: - 0...n if no index is used for merging - Index of the left DataFrame if merged only on the index of the right DataFrame - Index of the right DataFrame if merged only on the index of the left DataFrame - All involved indices if merged using the indices of both DataFrames e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will be an index (x, a, b) Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {'left', 'right', 'outer', 'inner'}, default 'inner' left: use only keys from left frame, similar to a SQL left outer join; not preserve key order unlike pandas. right: use only keys from right frame, similar to a SQL right outer join; not preserve key order unlike pandas. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; not preserve the order of the left keys unlike pandas. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on: Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on: Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- DataFrame.join : Join columns of another DataFrame. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}, ... columns=['lkey', 'value']) >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}, ... columns=['rkey', 'value']) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey') >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS lkey value_x rkey value_y ...bar 2 bar 6 ...baz 3 baz 7 ...foo 1 foo 5 ...foo 1 foo 8 ...foo 5 foo 5 ...foo 5 foo 8 >>> left_psdf = ps.DataFrame({'A': [1, 2]}) >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index() A B 1 2 x >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index() A B 0 1 None 1 2 x >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index() A B 1 2.0 x 2 NaN y >>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index() A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]: if os is None: return [] elif is_name_like_tuple(os): return [cast(Label, os)] elif is_name_like_value(os): return [(os,)] else: return [o if is_name_like_tuple(o) else (o,) for o in os] if isinstance(right, ps.Series): right = right.to_frame() if on: if left_on or right_on: raise ValueError( 'Can only pass argument "on" OR "left_on" and "right_on", ' "not a combination of both." ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(on))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(on))) else: # TODO: need special handling for multi-index. if left_index: left_key_names = self._internal.index_spark_column_names else: left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on))) if right_index: right_key_names = right._internal.index_spark_column_names else: right_key_names = list( map(right._internal.spark_column_name_for, to_list(right_on)) ) if left_key_names and not right_key_names: raise ValueError("Must pass right_on or right_index=True") if right_key_names and not left_key_names: raise ValueError("Must pass left_on or left_index=True") if not left_key_names and not right_key_names: common = list(self.columns.intersection(right.columns)) if len(common) == 0: raise ValueError( "No common columns to perform merge on. Merge options: " "left_on=None, right_on=None, left_index=False, right_index=False" ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(common))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(common))) if len(left_key_names) != len(right_key_names): raise ValueError("len(left_keys) must equal len(right_keys)") # We should distinguish the name to avoid ambiguous column name after merging. right_prefix = "__right_" right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names] how = validate_how(how) def resolve(internal: InternalFrame, side: str) -> InternalFrame: def rename(col: str) -> str: return "__{}_{}".format(side, col) internal = internal.resolved_copy sdf = internal.spark_frame sdf = sdf.select( *[ scol_for(sdf, col).alias(rename(col)) for col in sdf.columns if col not in HIDDEN_COLUMNS ], *HIDDEN_COLUMNS, ) return internal.copy( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.index_spark_column_names ], index_fields=[ field.copy(name=rename(field.name)) for field in internal.index_fields ], data_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.data_spark_column_names ], data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields], ) left_internal = self._internal.resolved_copy right_internal = resolve(right._internal, "right") left_table = left_internal.spark_frame.alias("left_table") right_table = right_internal.spark_frame.alias("right_table") left_key_columns = [scol_for(left_table, label) for label in left_key_names] right_key_columns = [scol_for(right_table, label) for label in right_key_names] join_condition = reduce( lambda x, y: x & y, [lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)], ) joined_table = left_table.join(right_table, join_condition, how=how) # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels) exprs = [] data_columns = [] column_labels = [] def left_scol_for(label: Label) -> Column: return scol_for(left_table, left_internal.spark_column_name_for(label)) def right_scol_for(label: Label) -> Column: return scol_for(right_table, right_internal.spark_column_name_for(label)) for label in left_internal.column_labels: col = left_internal.spark_column_name_for(label) scol = left_scol_for(label) if label in duplicate_columns: spark_column_name = left_internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and (right_prefix + spark_column_name) in right_key_names ): right_scol = right_scol_for(label) if how == "right": scol = right_scol.alias(col) elif how == "full": scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col) else: pass else: col = col + left_suffix scol = scol.alias(col) label = tuple([str(label[0]) + left_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) for label in right_internal.column_labels: # recover `right_prefix` here. col = right_internal.spark_column_name_for(label)[len(right_prefix) :] scol = right_scol_for(label).alias(col) if label in duplicate_columns: spark_column_name = left_internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and (right_prefix + spark_column_name) in right_key_names ): continue else: col = col + right_suffix scol = scol.alias(col) label = tuple([str(label[0]) + right_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) left_index_scols = left_internal.index_spark_columns right_index_scols = right_internal.index_spark_columns # Retain indices if they are used for joining if left_index: if right_index: if how in ("inner", "left"): exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names elif how == "right": exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names else: index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names for col, left_scol, right_scol in zip( index_spark_column_names, left_index_scols, right_index_scols ): scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol) exprs.append(scol.alias(col)) else: exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names elif right_index: exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names else: index_spark_column_names = [] index_names = [] selected_columns = joined_table.select(*exprs) internal = InternalFrame( spark_frame=selected_columns, index_spark_columns=[ scol_for(selected_columns, col) for col in index_spark_column_names ], index_names=index_names, column_labels=column_labels, data_spark_columns=[scol_for(selected_columns, col) for col in data_columns], ) return DataFrame(internal) def join( self, right: "DataFrame", on: Optional[Union[Name, List[Name]]] = None, how: str = "left", lsuffix: str = "", rsuffix: str = "", ) -> "DataFrame": """ Join columns of another DataFrame. Join columns with `right` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- right: DataFrame, Series on: str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `right`, otherwise joins index-on-index. If multiple values given, the `right` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how: {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use `left` frame’s index (or column if on is specified). * right: use `right`’s index. * outer: form union of `left` frame’s index (or column if on is specified) with right’s index, and sort it. lexicographically. * inner: form intersection of `left` frame’s index (or column if on is specified) with `right`’s index, preserving the order of the `left`’s one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from `right` frame's overlapping columns. Returns ------- DataFrame A dataframe containing columns from both the `left` and `right`. See Also -------- DataFrame.merge: For column(s)-on-columns(s) operations. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Notes ----- Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame objects. Examples -------- >>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], ... 'A': ['A0', 'A1', 'A2', 'A3']}, ... columns=['key', 'A']) >>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}, ... columns=['key', 'B']) >>> psdf1 key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 >>> psdf2 key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right') >>> join_psdf.sort_values(by=join_psdf.columns) key_left A key_right B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 None None If we want to join using the key columns, we need to set key to be the index in both df and right. The joined DataFrame will have key as its index. >>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key')) >>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 None Another option to join using the key columns is to use the on parameter. DataFrame.join always uses right’s index but we can use any column in df. This method not preserve the original DataFrame’s index in the result unlike pandas. >>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key') >>> join_psdf.index Int64Index([0, 1, 2, 3], dtype='int64') """ if isinstance(right, ps.Series): common = list(self.columns.intersection([right.name])) else: common = list(self.columns.intersection(right.columns)) if len(common) > 0 and not lsuffix and not rsuffix: raise ValueError( "columns overlap but no suffix specified: " "{rename}".format(rename=common) ) need_set_index = False if on: if not is_list_like(on): on = [on] if len(on) != right._internal.index_level: raise ValueError( 'len(left_on) must equal the number of levels in the index of "right"' ) need_set_index = len(set(on) & set(self.index.names)) == 0 if need_set_index: self = self.set_index(on) join_psdf = self.merge( right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix) ) return join_psdf.reset_index() if need_set_index else join_psdf def combine_first(self, other: "DataFrame") -> "DataFrame": """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. .. versionadded:: 3.3.0 Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame Examples -------- >>> ps.set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = ps.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2).sort_index() A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in other >>> df1 = ps.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = ps.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2).sort_index() A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 >>> ps.reset_option("compute.ops_on_diff_frames") """ if not isinstance(other, DataFrame): raise TypeError("`combine_first` only allows `DataFrame` for parameter `other`") if same_anchor(self, other): combined = self this = self that = other else: combined = combine_frames(self, other) this = combined["this"] that = combined["that"] intersect_column_labels = set(self._internal.column_labels).intersection( set(other._internal.column_labels) ) column_labels, data_spark_columns = [], [] for column_label in this._internal.column_labels: this_scol = this._internal.spark_column_for(column_label) if column_label in intersect_column_labels: that_scol = that._internal.spark_column_for(column_label) this_scol_name = this._internal.spark_column_name_for(column_label) combined_scol = ( F.when(this_scol.isNull(), that_scol).otherwise(this_scol).alias(this_scol_name) ) data_spark_columns.append(combined_scol) else: data_spark_columns.append(this_scol) column_labels.append(column_label) for column_label in that._internal.column_labels: if column_label not in intersect_column_labels: that_scol = that._internal.spark_column_for(column_label) data_spark_columns.append(that_scol) column_labels.append(column_label) internal = combined._internal.copy( column_labels=column_labels, data_spark_columns=data_spark_columns, data_fields=None, # TODO: dtype? column_label_names=self._internal.column_label_names, ) return DataFrame(internal) def append( self, other: "DataFrame", ignore_index: bool = False, verify_integrity: bool = False, sort: bool = False, ) -> "DataFrame": """ Append rows of other to the end of caller, returning a new object. Columns in other that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default False Currently not supported. Returns ------- appended : DataFrame Examples -------- >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df.append(df) A B 0 1 2 1 3 4 0 1 2 1 3 4 >>> df.append(df, ignore_index=True) A B 0 1 2 1 3 4 2 1 2 3 3 4 """ if isinstance(other, ps.Series): raise TypeError("DataFrames.append() does not support appending Series to DataFrames") if sort: raise NotImplementedError("The 'sort' parameter is currently not supported") if not ignore_index: index_scols = self._internal.index_spark_columns if len(index_scols) != other._internal.index_level: raise ValueError("Both DataFrames have to have the same number of index levels") if verify_integrity and len(index_scols) > 0: if ( self._internal.spark_frame.select(index_scols) .intersect( other._internal.spark_frame.select(other._internal.index_spark_columns) ) .count() ) > 0: raise ValueError("Indices have overlapping values") # Lazy import to avoid circular dependency issues from pyspark.pandas.namespace import concat return cast(DataFrame, concat([self, other], ignore_index=ignore_index)) # TODO: add 'filter_func' and 'errors' parameter def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True) -> None: """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or Series join : 'left', default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. Returns ------- None : method directly changes calling object See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. DataFrame.join : Join columns of another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df.sort_index() A B 0 a d 1 b y 2 c e If `other` contains None the corresponding values are not updated in the original dataframe. >>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4.0 1 2 500.0 2 3 6.0 """ if join != "left": raise NotImplementedError("Only left join is supported") if isinstance(other, ps.Series): other = other.to_frame() update_columns = list( set(self._internal.column_labels).intersection(set(other._internal.column_labels)) ) update_sdf = self.join( other[update_columns], rsuffix="_new" )._internal.resolved_copy.spark_frame data_fields = self._internal.data_fields.copy() for column_labels in update_columns: column_name = self._internal.spark_column_name_for(column_labels) old_col = scol_for(update_sdf, column_name) new_col = scol_for( update_sdf, other._internal.spark_column_name_for(column_labels) + "_new" ) if overwrite: update_sdf = update_sdf.withColumn( column_name, F.when(new_col.isNull(), old_col).otherwise(new_col) ) else: update_sdf = update_sdf.withColumn( column_name, F.when(old_col.isNull(), new_col).otherwise(old_col) ) data_fields[self._internal.column_labels.index(column_labels)] = None sdf = update_sdf.select( *[scol_for(update_sdf, col) for col in self._internal.spark_column_names], *HIDDEN_COLUMNS, ) internal = self._internal.with_new_sdf(sdf, data_fields=data_fields) self._update_internal_frame(internal, check_same_anchor=False) # TODO: ddof should be implemented. def cov(self, min_periods: Optional[int] = None) -> "DataFrame": """ Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. .. versionadded:: 3.3.0 Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. Examples -------- >>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = ps.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> sdf = ps.from_pandas(df) >>> sdf.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202 """ min_periods = 1 if min_periods is None else min_periods # Only compute covariance for Boolean and Numeric except Decimal psdf = self[ [ col for col in self.columns if isinstance(self[col].spark.data_type, BooleanType) or ( isinstance(self[col].spark.data_type, NumericType) and not isinstance(self[col].spark.data_type, DecimalType) ) ] ] num_cols = len(psdf.columns) cov = np.zeros([num_cols, num_cols]) if num_cols == 0: return DataFrame() if len(psdf) < min_periods: cov.fill(np.nan) return DataFrame(cov, columns=psdf.columns, index=psdf.columns) data_cols = psdf._internal.data_spark_column_names cov_scols = [] count_not_null_scols = [] # Count number of null row between two columns # Example: # a b c # 0 1 1 1 # 1 NaN 2 2 # 2 3 NaN 3 # 3 4 4 4 # # a b c # a count(a, a) count(a, b) count(a, c) # b count(b, b) count(b, c) # c count(c, c) # # count_not_null_scols = # [F.count(a, a), F.count(a, b), F.count(a, c), F.count(b, b), F.count(b, c), F.count(c, c)] for r in range(0, num_cols): for c in range(r, num_cols): count_not_null_scols.append( F.count( F.when(F.col(data_cols[r]).isNotNull() & F.col(data_cols[c]).isNotNull(), 1) ) ) count_not_null = ( psdf._internal.spark_frame.replace(float("nan"), None) .select(*count_not_null_scols) .head(1)[0] ) # Calculate covariance between two columns # Example: # with min_periods = 3 # a b c # 0 1 1 1 # 1 NaN 2 2 # 2 3 NaN 3 # 3 4 4 4 # # a b c # a cov(a, a) None cov(a, c) # b cov(b, b) cov(b, c) # c cov(c, c) # # cov_scols = [F.cov(a, a), None, F.cov(a, c), F.cov(b, b), F.cov(b, c), F.cov(c, c)] step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov_scols.append( F.covar_samp( F.col(data_cols[r]).cast("double"), F.col(data_cols[c]).cast("double") ) if count_not_null[r * num_cols + c - step] >= min_periods else F.lit(None) ) pair_cov = psdf._internal.spark_frame.select(*cov_scols).head(1)[0] # Convert from row to 2D array # Example: # pair_cov = [cov(a, a), None, cov(a, c), cov(b, b), cov(b, c), cov(c, c)] # # cov = # # a b c # a cov(a, a) None cov(a, c) # b cov(b, b) cov(b, c) # c cov(c, c) step = 0 for r in range(0, num_cols): step += r for c in range(r, num_cols): cov[r][c] = pair_cov[r * num_cols + c - step] # Copy values # Example: # cov = # a b c # a cov(a, a) None cov(a, c) # b None cov(b, b) cov(b, c) # c cov(a, c) cov(b, c) cov(c, c) cov = cov + cov.T - np.diag(np.diag(cov)) return DataFrame(cov, columns=psdf.columns, index=psdf.columns) def sample( self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None, ignore_index: bool = False, ) -> "DataFrame": """ Return a random sample of items from an axis of object. Please call this function using named argument by specifying the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 3.4.0 Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 A random 50% sample of the ``DataFrame``, while ignoring the index. >>> df.sample(frac=0.5, random_state=1, ignore_index=True) # doctest: +SKIP num_legs num_wings num_specimen_seen 0 4 0 2 1 8 0 1 2 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ... """ # Note: we don't run any of the doctests because the result can change depending on the # system's core count. if n is not None: raise NotImplementedError( "Function sample currently does not support specifying " "exact number of items to return. Use frac instead." ) if frac is None: raise ValueError("frac must be specified.") sdf = self._internal.resolved_copy.spark_frame.sample( withReplacement=replace, fraction=frac, seed=random_state ) if ignore_index: return DataFrame(sdf.drop(*self._internal.index_spark_column_names)) else: return DataFrame(self._internal.with_new_sdf(sdf)) def astype(self, dtype: Union[str, Dtype, Dict[Name, Union[str, Dtype]]]) -> "DataFrame": """ Cast a pandas-on-Spark object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3 """ applied = [] if is_dict_like(dtype): dtype_dict = cast(Dict[Name, Union[str, Dtype]], dtype) for col_name in dtype_dict.keys(): if col_name not in self.columns: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument." ) for col_name, col in self.items(): if col_name in dtype_dict: applied.append(col.astype(dtype=dtype_dict[col_name])) else: applied.append(col) else: for col_name, col in self.items(): applied.append(col.astype(dtype=cast(Union[str, Dtype], dtype))) return DataFrame(self._internal.with_new_columns(applied)) def add_prefix(self, prefix: str) -> "DataFrame": """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(prefix, str) return self._apply_series_op( lambda psser: psser.rename(tuple([prefix + i for i in psser._column_label])) ) def add_suffix(self, suffix: str) -> "DataFrame": """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(suffix, str) return self._apply_series_op( lambda psser: psser.rename(tuple([i + suffix for i in psser._column_label])) ) # TODO: include, and exclude should be implemented. def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame": """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75] A list of percentiles to be computed. Returns ------- DataFrame Summary statistics of the Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``. For object data (e.g. strings or timestamps), the result’s index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value’s frequency. Timestamps also include the ``first`` and ``last`` items. Examples -------- Describing a numeric ``Series``. >>> s = ps.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 dtype: float64 Describing a ``DataFrame``. Only numeric fields are returned. >>> df = ps.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0], ... 'object': ['a', 'b', 'c'] ... }, ... columns=['numeric1', 'numeric2', 'object']) >>> df.describe() numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 For multi-index columns: >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')] >>> df.describe() # doctest: +NORMALIZE_WHITESPACE num a b count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 >>> df[('num', 'b')].describe() count 3.0 mean 5.0 std 1.0 min 4.0 25% 4.0 50% 5.0 75% 6.0 max 6.0 Name: (num, b), dtype: float64 Describing a ``DataFrame`` and selecting custom percentiles. >>> df = ps.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0] ... }, ... columns=['numeric1', 'numeric2']) >>> df.describe(percentiles = [0.85, 0.15]) numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 15% 1.0 4.0 50% 2.0 5.0 85% 3.0 6.0 max 3.0 6.0 Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric1.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: numeric1, dtype: float64 Describing a column from a ``DataFrame`` by accessing it as an attribute and selecting custom percentiles. >>> df.numeric1.describe(percentiles = [0.85, 0.15]) count 3.0 mean 2.0 std 1.0 min 1.0 15% 1.0 50% 2.0 85% 3.0 max 3.0 Name: numeric1, dtype: float64 """ psser_numeric: List[Series] = [] psser_string: List[Series] = [] psser_timestamp: List[Series] = [] spark_data_types: List[DataType] = [] column_labels: Optional[List[Label]] = [] column_names: List[str] = [] for label in self._internal.column_labels: psser = self._psser_for(label) spark_data_type = psser.spark.data_type if isinstance(spark_data_type, NumericType): psser_numeric.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) elif isinstance(spark_data_type, (TimestampType, TimestampNTZType)): psser_timestamp.append(psser) column_labels.append(label) spark_data_types.append(spark_data_type) else: psser_string.append(psser) column_names.append(self._internal.spark_column_name_for(label)) if percentiles is not None: if any((p < 0.0) or (p > 1.0) for p in percentiles): raise ValueError("Percentiles should all be in the interval [0, 1]") # appending 50% if not in percentiles already percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles else: percentiles = [0.25, 0.5, 0.75] # Identify the cases is_all_string_type = ( len(psser_numeric) == 0 and len(psser_timestamp) == 0 and len(psser_string) > 0 ) is_all_numeric_type = len(psser_numeric) > 0 and len(psser_timestamp) == 0 has_timestamp_type = len(psser_timestamp) > 0 has_numeric_type = len(psser_numeric) > 0 if is_all_string_type: # Handling string type columns # We will retrive the `count`, `unique`, `top` and `freq`. internal = self._internal.resolved_copy exprs_string = [ internal.spark_column_for(psser._column_label) for psser in psser_string ] sdf = internal.spark_frame.select(*exprs_string) # Get `count` & `unique` for each columns counts, uniques = map(lambda x: x[1:], sdf.summary("count", "count_distinct").take(2)) # Handling Empty DataFrame if len(counts) == 0 or counts[0] == "0": data = dict() for psser in psser_string: data[psser.name] = [0, 0, np.nan, np.nan] return DataFrame(data, index=["count", "unique", "top", "freq"]) # Get `top` & `freq` for each columns tops = [] freqs = [] # TODO(SPARK-37711): We should do it in single pass since invoking Spark job # for every columns is too expensive. for column in exprs_string: top, freq = sdf.groupby(column).count().sort("count", ascending=False).first() tops.append(str(top)) freqs.append(str(freq)) stats = [counts, uniques, tops, freqs] stats_names = ["count", "unique", "top", "freq"] result: DataFrame = DataFrame( data=stats, index=stats_names, columns=column_names, ) elif is_all_numeric_type: # Handling numeric columns exprs_numeric = [ psser._dtype_op.nan_to_null(psser).spark.column for psser in psser_numeric ] formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)] stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"] # In this case, we can simply use `summary` to calculate the stats. sdf = self._internal.spark_frame.select(*exprs_numeric).summary(*stats) sdf = sdf.replace("stddev", "std", subset=["summary"]) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, "summary")], column_labels=column_labels, data_spark_columns=[ scol_for(sdf, self._internal.spark_column_name_for(label)) for label in column_labels ], ) result = DataFrame(internal).astype("float64") elif has_timestamp_type: internal = self._internal.resolved_copy column_names = [ internal.spark_column_name_for(column_label) for column_label in column_labels ] column_length = len(column_labels) # Apply stat functions for each column. count_exprs = map(F.count, column_names) min_exprs = map(F.min, column_names) # Here we try to flat the multiple map into single list that contains each calculated # percentile using `chain`. # e.g. flat the `[<map object at 0x7fc1907dc280>, <map object at 0x7fc1907dcc70>]` # to `[Column<'percentile_approx(A, 0.2, 10000)'>, # Column<'percentile_approx(B, 0.2, 10000)'>, # Column<'percentile_approx(A, 0.5, 10000)'>, # Column<'percentile_approx(B, 0.5, 10000)'>]` perc_exprs = chain( *[ map(F.percentile_approx, column_names, [percentile] * column_length) for percentile in percentiles ] ) max_exprs = map(F.max, column_names) mean_exprs = [] for column_name, spark_data_type in zip(column_names, spark_data_types): mean_exprs.append(F.mean(column_name).astype(spark_data_type)) exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, *max_exprs] formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)] stats_names = ["count", "mean", "min", *formatted_perc, "max"] # If not all columns are timestamp type, # we also need to calculate the `std` for numeric columns if has_numeric_type: std_exprs = [] for label, spark_data_type in zip(column_labels, spark_data_types): column_name = label[0] if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): std_exprs.append(F.lit(None).alias("stddev_samp({})".format(column_name))) else: std_exprs.append(F.stddev(column_name)) exprs.extend(std_exprs) stats_names.append("std") # Select stats for all columns at once. sdf = internal.spark_frame.select(exprs) stat_values = sdf.first() num_stats = int(len(exprs) / column_length) # `column_name_stats_kv` is key-value store that has column name as key, and # the stats as values e.g. {"A": [{count_value}, {min_value}, ...], # "B": [{count_value}, {min_value} ...]} column_name_stats_kv: Dict[str, List[str]] = defaultdict(list) for i, column_name in enumerate(column_names): for first_stat_idx in range(num_stats): column_name_stats_kv[column_name].append( stat_values[(first_stat_idx * column_length) + i] ) # For timestamp type columns, we should cast the column type to string. for key, spark_data_type in zip(column_name_stats_kv, spark_data_types): if isinstance(spark_data_type, (TimestampType, TimestampNTZType)): column_name_stats_kv[key] = [str(value) for value in column_name_stats_kv[key]] result: DataFrame = DataFrame( # type: ignore[no-redef] data=column_name_stats_kv, index=stats_names, columns=column_names, ) else: # Empty DataFrame without column raise ValueError("Cannot describe a DataFrame without columns") return result def drop_duplicates( self, subset: Optional[Union[Name, List[Name]]] = None, keep: Union[bool, str] = "first", inplace: bool = False, ignore_index: bool = False, ) -> Optional["DataFrame"]: """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy. ignore_index : boolean, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. Returns ------- DataFrame DataFrame with duplicates removed or None if ``inplace=True``. >>> df = ps.DataFrame( ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b']) >>> df a b 0 1 a 1 2 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates().sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates(ignore_index=True).sort_index() a b 0 1 a 1 2 a 2 2 c 3 3 d >>> df.drop_duplicates('a').sort_index() a b 0 1 a 1 2 a 4 3 d >>> df.drop_duplicates(['a', 'b']).sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep='last').sort_index() a b 0 1 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep=False).sort_index() a b 0 1 a 3 2 c 4 3 d """ inplace = validate_bool_kwarg(inplace, "inplace") sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.where(~scol_for(sdf, column)).drop(column) internal = self._internal.with_new_sdf(sdf) psdf: DataFrame = DataFrame(internal) if inplace: if ignore_index: psdf.reset_index(drop=True, inplace=inplace) self._update_internal_frame(psdf._internal) return None else: return psdf.reset_index(drop=True) if ignore_index else psdf def reindex( self, labels: Optional[Sequence[Any]] = None, index: Optional[Union["Index", Sequence[Any]]] = None, columns: Optional[Union[pd.Index, Sequence[Any]]] = None, axis: Optional[Axis] = None, copy: Optional[bool] = True, fill_value: Optional[Any] = None, ) -> "DataFrame": """ Conform DataFrame to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- labels: array-like, optional New labels / index to conform the axis specified by ‘axis’ to. index, columns: array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data axis: int or str, optional Axis to target. Can be either the axis name (‘index’, ‘columns’) or number (0, 1). copy : bool, default True Return a new object, even if the passed indexes are the same. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. Returns ------- DataFrame with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = ps.DataFrame({ ... 'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, ... index=index, ... columns=['http_status', 'response_time']) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index).sort_index() http_status response_time Chrome 200.0 0.02 Comodo Dragon NaN NaN IE10 404.0 0.08 Iceweasel NaN NaN Safari 404.0 0.07 We can fill in the missing values by passing a value to the keyword ``fill_value``. >>> df.reindex(new_index, fill_value=0, copy=False).sort_index() http_status response_time Chrome 200 0.02 Comodo Dragon 0 0.00 IE10 404 0.08 Iceweasel 0 0.00 Safari 404 0.07 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']).sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = ps.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, ... index=date_index) >>> df2.sort_index() prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2).sort_index() prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN """ if axis is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.") if labels is not None: axis = validate_axis(axis) if axis == 0: index = labels elif axis == 1: columns = labels if index is not None and not is_list_like(index): raise TypeError( "Index must be called with a collection of some kind, " "%s was passed" % type(index) ) if columns is not None and not is_list_like(columns): raise TypeError( "Columns must be called with a collection of some kind, " "%s was passed" % type(columns) ) df = self if index is not None: df = df._reindex_index(index, fill_value) if columns is not None: df = df._reindex_columns(columns, fill_value) # Copy if copy and df is self: return df.copy() else: return df def _reindex_index( self, index: Optional[Union["Index", Sequence[Any]]], fill_value: Optional[Any] ) -> "DataFrame": # When axis is index, we can mimic pandas' by a right outer join. nlevels = self._internal.index_level assert nlevels <= 1 or ( isinstance(index, ps.MultiIndex) and nlevels == index.nlevels ), "MultiIndex DataFrame can only be reindexed with a similar pandas-on-Spark MultiIndex." index_columns = self._internal.index_spark_column_names frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME) if isinstance(index, ps.Index): if nlevels != index.nlevels: return DataFrame(index._internal.with_new_columns([])).reindex( columns=self.columns, fill_value=fill_value ) index_names = index._internal.index_names scols = index._internal.index_spark_columns labels = index._internal.spark_frame.select( [scol.alias(index_column) for scol, index_column in zip(scols, index_columns)] ) else: index = ps.Index(list(index)) labels = index._internal.spark_frame.select(index.spark.column.alias(index_columns[0])) index_names = self._internal.index_names if fill_value is not None: frame_index_columns = [ verify_temp_column_name(frame, "__frame_index_column_{}__".format(i)) for i in range(nlevels) ] index_scols = [ scol_for(frame, index_col).alias(frame_index_col) for index_col, frame_index_col in zip(index_columns, frame_index_columns) ] scols = self._internal.resolved_copy.data_spark_columns frame = frame.select(index_scols + scols) temp_fill_value = verify_temp_column_name(frame, "__fill_value__") labels = labels.withColumn(temp_fill_value, SF.lit(fill_value)) frame_index_scols = [scol_for(frame, col) for col in frame_index_columns] labels_index_scols = [scol_for(labels, col) for col in index_columns] joined_df = frame.join( labels, on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)], how="right", ) joined_df = joined_df.select( *labels_index_scols, *[ F.when( reduce( lambda c1, c2: c1 & c2, [ fcol.isNull() & lcol.isNotNull() for fcol, lcol in zip(frame_index_scols, labels_index_scols) ], ), scol_for(joined_df, temp_fill_value), ) .otherwise(scol_for(joined_df, col)) .alias(col) for col in self._internal.data_spark_column_names ], ) data_fields = None else: joined_df = frame.join(labels, on=index_columns, how="right") data_fields = [field.copy(nullable=True) for field in self._internal.data_fields] sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME) internal = self._internal.copy( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=index_names, index_fields=[ field.copy(name=name) for field, name in zip( index._internal.index_fields, self._internal.index_spark_column_names ) ], data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], data_fields=data_fields, ) return DataFrame(internal) def _reindex_columns( self, columns: Optional[Union[pd.Index, Sequence[Any]]], fill_value: Optional[Any] ) -> "DataFrame": level = self._internal.column_labels_level if level > 1: label_columns = list(columns) for col in label_columns: if not isinstance(col, tuple): raise TypeError("Expected tuple, got {}".format(type(col).__name__)) else: label_columns = [(col,) for col in columns] for col in label_columns: if len(col) != level: raise ValueError( "shape (1,{}) doesn't match the shape (1,{})".format(len(col), level) ) fill_value = np.nan if fill_value is None else fill_value scols_or_pssers: List[Union[Series, Column]] = [] labels = [] for label in label_columns: if label in self._internal.column_labels: scols_or_pssers.append(self._psser_for(label)) else: scols_or_pssers.append(SF.lit(fill_value).alias(name_like_string(label))) labels.append(label) if isinstance(columns, pd.Index): column_label_names = [ name if is_name_like_tuple(name) else (name,) for name in columns.names ] internal = self._internal.with_new_columns( scols_or_pssers, column_labels=labels, column_label_names=column_label_names ) else: internal = self._internal.with_new_columns(scols_or_pssers, column_labels=labels) return DataFrame(internal) def reindex_like(self, other: "DataFrame", copy: bool = True) -> "DataFrame": """ Return a DataFrame with matching indices as other object. Conform the object to the same index on all axes. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : DataFrame Its row and column indices are used to define the new indices of this object. copy : bool, default True Return a new object, even if the passed indexes are the same. Returns ------- DataFrame DataFrame with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = ps.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = ps.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN None 2014-02-15 35.1 NaN medium """ if isinstance(other, DataFrame): return self.reindex(index=other.index, columns=other.columns, copy=copy) else: raise TypeError("other must be a pandas-on-Spark DataFrame") def melt( self, id_vars: Optional[Union[Name, List[Name]]] = None, value_vars: Optional[Union[Name, List[Name]]] = None, var_name: Optional[Union[str, List[str]]] = None, value_name: str = "value", ) -> "DataFrame": """ Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar, default 'variable' Name to use for the 'variable' column. If None it uses `frame.columns.name` or ‘variable’. value_name : scalar, default 'value' Name to use for the 'value' column. Returns ------- DataFrame Unpivoted DataFrame. Examples -------- >>> df = ps.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}, ... columns=['A', 'B', 'C']) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> ps.melt(df) variable value 0 A a 1 B 1 2 C 2 3 A b 4 B 3 5 C 4 6 A c 7 B 5 8 C 6 >>> df.melt(id_vars='A') A variable value 0 a B 1 1 a C 2 2 b B 3 3 b C 4 4 c B 5 5 c C 6 >>> df.melt(value_vars='A') variable value 0 A a 1 A b 2 A c >>> ps.melt(df, id_vars=['A', 'B']) A B variable value 0 a 1 C 2 1 b 3 C 4 2 c 5 C 6 >>> df.melt(id_vars=['A'], value_vars=['C']) A variable value 0 a C 2 1 b C 4 2 c C 6 The names of 'variable' and 'value' columns can be customized: >>> ps.melt(df, id_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 """ column_labels = self._internal.column_labels if id_vars is None: id_vars = [] else: if isinstance(id_vars, tuple): if self._internal.column_labels_level == 1: id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars] else: raise ValueError( "id_vars must be a list of tuples" " when columns are a MultiIndex" ) elif is_name_like_value(id_vars): id_vars = [(id_vars,)] else: id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars] non_existence_col = [idv for idv in id_vars if idv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels: np.ndarray[Any, np.dtype[Any]] = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'id_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if value_vars is None: value_vars = [] else: if isinstance(value_vars, tuple): if self._internal.column_labels_level == 1: value_vars = [ valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars ] else: raise ValueError( "value_vars must be a list of tuples" " when columns are a MultiIndex" ) elif is_name_like_value(value_vars): value_vars = [(value_vars,)] else: value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars] non_existence_col = [valv for valv in value_vars if valv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'value_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if len(value_vars) == 0: value_vars = column_labels column_labels = [label for label in column_labels if label not in id_vars] sdf = self._internal.spark_frame if var_name is None: if ( self._internal.column_labels_level == 1 and self._internal.column_label_names[0] is None ): var_name = ["variable"] else: var_name = [ name_like_string(name) if name is not None else "variable_{}".format(i) for i, name in enumerate(self._internal.column_label_names) ] elif isinstance(var_name, str): var_name = [var_name] pairs = F.explode( F.array( *[ F.struct( *[SF.lit(c).alias(name) for c, name in zip(label, var_name)], *[self._internal.spark_column_for(label).alias(value_name)], ) for label in column_labels if label in value_vars ] ) ) columns = ( [ self._internal.spark_column_for(label).alias(name_like_string(label)) for label in id_vars ] + [F.col("pairs.`%s`" % name) for name in var_name] + [F.col("pairs.`%s`" % value_name)] ) exploded_df = sdf.withColumn("pairs", pairs).select(columns) return DataFrame( InternalFrame( spark_frame=exploded_df, index_spark_columns=None, column_labels=( [label if len(label) == 1 else (name_like_string(label),) for label in id_vars] + [(name,) for name in var_name] + [(value_name,)] ), ) ) def stack(self) -> DataFrameOrSeries: """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack().sort_index() cat height 1 weight 0 dog height 3 weight 2 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack().sort_index() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN """ from pyspark.pandas.series import first_series if len(self._internal.column_labels) == 0: return DataFrame( self._internal.copy( column_label_names=self._internal.column_label_names[:-1] ).with_filter(SF.lit(False)) ) column_labels: Dict[Label, Dict[Any, Column]] = defaultdict(dict) index_values = set() should_returns_series = False for label in self._internal.column_labels: new_label = label[:-1] if len(new_label) == 0: new_label = None should_returns_series = True value = label[-1] scol = self._internal.spark_column_for(label) column_labels[new_label][value] = scol index_values.add(value) column_labels = dict(sorted(column_labels.items(), key=lambda x: x[0])) index_name = self._internal.column_label_names[-1] column_label_names = self._internal.column_label_names[:-1] if len(column_label_names) == 0: column_label_names = [None] index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level) data_columns = [name_like_string(label) for label in column_labels] structs = [ F.struct( *[SF.lit(value).alias(index_column)], *[ ( column_labels[label][value] if value in column_labels[label] else SF.lit(None) ).alias(name) for label, name in zip(column_labels, data_columns) ], ).alias(value) for value in index_values ] pairs = F.explode(F.array(*structs)) sdf = self._internal.spark_frame.withColumn("pairs", pairs) sdf = sdf.select( self._internal.index_spark_columns + [sdf["pairs"][index_column].alias(index_column)] + [sdf["pairs"][name].alias(name) for name in data_columns] ) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in (self._internal.index_spark_column_names + [index_column]) ], index_names=self._internal.index_names + [index_name], index_fields=self._internal.index_fields + [None], column_labels=list(column_labels), data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) psdf: DataFrame = DataFrame(internal) if should_returns_series: return first_series(psdf) else: return psdf def unstack(self) -> DataFrameOrSeries: """ Pivot the (necessarily hierarchical) index labels. Returns a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series. .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and it could cause a serious performance degradation since Spark partitions it row based. Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack). Examples -------- >>> df = ps.DataFrame({"A": {"0": "a", "1": "b", "2": "c"}, ... "B": {"0": "1", "1": "3", "2": "5"}, ... "C": {"0": "2", "1": "4", "2": "6"}}, ... columns=["A", "B", "C"]) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() A 0 a 1 b 2 c B 0 1 1 3 2 5 C 0 2 1 4 2 6 dtype: object >>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')]) >>> df.unstack().sort_index() X A 0 a 1 b 2 c B 0 1 1 3 2 5 Y C 0 2 1 4 2 6 dtype: object For MultiIndex case: >>> df = ps.DataFrame({"A": ["a", "b", "c"], ... "B": [1, 3, 5], ... "C": [2, 4, 6]}, ... columns=["A", "B", "C"]) >>> df = df.set_index('A', append=True) >>> df # doctest: +NORMALIZE_WHITESPACE B C A 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE B C A a b c a b c 0 1.0 NaN NaN 2.0 NaN NaN 1 NaN 3.0 NaN NaN 4.0 NaN 2 NaN NaN 5.0 NaN NaN 6.0 """ from pyspark.pandas.series import first_series if self._internal.index_level > 1: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: self._internal.index_level - 1] columns = df.columns[self._internal.index_level - 1] df = df.pivot_table( index=index, columns=columns, values=self._internal.column_labels, aggfunc="first" ) internal = df._internal.copy( index_names=self._internal.index_names[:-1], index_fields=df._internal.index_fields[: self._internal.index_level - 1], column_label_names=( df._internal.column_label_names[:-1] + [ None if self._internal.index_names[-1] is None else df._internal.column_label_names[-1] ] ), ) return DataFrame(internal) # TODO: Codes here are similar with melt. Should we deduplicate? column_labels = self._internal.column_labels ser_name = SPARK_DEFAULT_SERIES_NAME sdf = self._internal.spark_frame new_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] new_index_map = list(zip_longest(new_index_columns, self._internal.column_label_names, [])) pairs = F.explode( F.array( *[ F.struct( *[SF.lit(c).alias(name) for c, name in zip(idx, new_index_columns)], *[self._internal.spark_column_for(idx).alias(ser_name)], ) for idx in column_labels ] ) ) columns = [ F.col("pairs.%s" % name) for name in new_index_columns[: self._internal.column_labels_level] ] + [F.col("pairs.%s" % ser_name)] new_index_len = len(new_index_columns) existing_index_columns = [] for i, (index_name, index_field) in enumerate( zip(self._internal.index_names, self._internal.index_fields) ): name = SPARK_INDEX_NAME_FORMAT(i + new_index_len) new_index_map.append((name, index_name, index_field.copy(name=name))) existing_index_columns.append(self._internal.index_spark_columns[i].alias(name)) exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns) index_spark_column_names, index_names, index_fields = zip(*new_index_map) return first_series( DataFrame( InternalFrame( exploded_df, index_spark_columns=[ scol_for(exploded_df, col) for col in index_spark_column_names ], index_names=list(index_names), index_fields=list(index_fields), column_labels=[None], ) ) ) # TODO: axis, level and **kwargs should be implemented. def all( self, axis: Axis = 0, bool_only: Optional[bool] = None, skipna: bool = True ) -> "Series": """ Return whether all elements are True. Returns True unless there is at least one element within a series that is False or equivalent (e.g. zero or empty) Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. skipna : boolean, default True Exclude NA values, such as None or numpy.NaN. If an entire row/column is NA values and `skipna` is True, then the result will be True, as for an empty row/column. If `skipna` is False, numpy.NaNs are treated as True because these are not equal to zero, Nones are treated as False. Returns ------- Series Examples -------- Create a dataframe from a dictionary. >>> df = ps.DataFrame({ ... 'col1': [True, True, True], ... 'col2': [True, False, False], ... 'col3': [0, 0, 0], ... 'col4': [1, 2, 3], ... 'col5': [True, True, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return True. >>> df.all() col1 True col2 False col3 False col4 True col5 True col6 False dtype: bool Include NA values when set `skipna=False`. >>> df[['col5', 'col6']].all(skipna=False) col5 False col6 False dtype: bool Include only boolean columns when set `bool_only=True`. >>> df.all(bool_only=True) col1 True col2 False dtype: bool """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') column_labels = self._internal.column_labels if bool_only: column_labels = self._bool_column_labels(column_labels) if len(column_labels) == 0: return ps.Series([], dtype=bool) applied = [] for label in column_labels: scol = self._internal.spark_column_for(label) if isinstance(self._internal.spark_type_for(label), NumericType) or skipna: # np.nan takes no effect to the result; None takes no effect if `skipna` all_col = F.min(F.coalesce(scol.cast("boolean"), SF.lit(True))) else: # Take None as False when not `skipna` all_col = F.min( F.when(scol.isNull(), SF.lit(False)).otherwise(scol.cast("boolean")) ) applied.append(F.when(all_col.isNull(), True).otherwise(all_col)) return self._result_aggregated(column_labels, applied) # TODO: axis, skipna, level and **kwargs should be implemented. def any(self, axis: Axis = 0, bool_only: Optional[bool] = None) -> "Series": """ Return whether any element is True. Returns False unless there is at least one element within a series that is True or equivalent (e.g. non-zero or non-empty). Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. Returns ------- Series Examples -------- Create a dataframe from a dictionary. >>> df = ps.DataFrame({ ... 'col1': [False, False, False], ... 'col2': [True, False, False], ... 'col3': [0, 0, 1], ... 'col4': [0, 1, 2], ... 'col5': [False, False, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return True. >>> df.any() col1 False col2 True col3 True col4 True col5 False col6 True dtype: bool Include only boolean columns when set `bool_only=True`. >>> df.any(bool_only=True) col1 False col2 True dtype: bool Returns empty Series when the DataFrame is empty. >>> df[[]].any() Series([], dtype: bool) """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') column_labels = self._internal.column_labels if bool_only: column_labels = self._bool_column_labels(column_labels) if len(column_labels) == 0: return ps.Series([], dtype=bool) applied = [] for label in column_labels: scol = self._internal.spark_column_for(label) any_col = F.max(F.coalesce(scol.cast("boolean"), SF.lit(False))) applied.append(F.when(any_col.isNull(), False).otherwise(any_col)) return self._result_aggregated(column_labels, applied) def _bool_column_labels(self, column_labels: List[Label]) -> List[Label]: """ Filter column labels of boolean columns (without None). """ bool_column_labels = [] for label in column_labels: psser = self._psser_for(label) if is_bool_dtype(psser): # Rely on dtype rather than spark type because # columns that consist of bools and Nones should be excluded # if bool_only is True bool_column_labels.append(label) return bool_column_labels def _result_aggregated(self, column_labels: List[Label], scols: List[Column]) -> "Series": """ Given aggregated Spark columns and respective column labels from the original pandas-on-Spark DataFrame, construct the result Series. """ from pyspark.pandas.series import first_series cols = [] result_scol_name = "value" for label, applied_col in zip(column_labels, scols): cols.append( F.struct( *[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)], *[applied_col.alias(result_scol_name)], ) ) # Statements under this comment implement spark frame transformations as below: # From: # +-------------------------------------------------------------------------------------+ # |arrays | # +-------------------------------------------------------------------------------------+ # |[{col1, true}, {col2, true}, {col3, false}, {col4, true}]| # +-------------------------------------------------------------------------------------+ # To: # +-------------+ # |col | # +-------------+ # |{col1, true} | # |{col2, true} | # |{col3, false}| # |{col4, true} | # +-------------+ # To: # +-----------------+-----+ # |__index_level_0__|value| # +-----------------+-----+ # |col1 |true | # |col2 |true | # |col3 |false| # |col4 |true | # +-----------------+-----+ sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select( F.explode(F.col("arrays")) ) sdf = sdf.selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i)) for i in range(self._internal.column_labels_level) ], index_names=self._internal.column_label_names, column_labels=[None], data_spark_columns=[scol_for(sdf, result_scol_name)], ) # (cont.) The result Series should look as below: # col1 False # col2 True # col3 True # col4 True # dtype: bool return first_series(DataFrame(internal)) # TODO: add axis, pct, na_option parameter def rank( self, method: str = "average", ascending: bool = True, numeric_only: Optional[bool] = None ) -> "DataFrame": """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. .. note:: the current implementation of rank uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) numeric_only : bool, optional For DataFrame objects, rank only numeric columns if set to True. Returns ------- ranks : same type as caller Examples -------- >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B']) >>> df A B 0 1 4 1 2 3 2 2 2 3 3 1 >>> df.rank().sort_index() A B 0 1.0 4.0 1 2.5 3.0 2 2.5 2.0 3 4.0 1.0 If method is set to 'min', it use lowest rank in group. >>> df.rank(method='min').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 4.0 1.0 If method is set to 'max', it use highest rank in group. >>> df.rank(method='max').sort_index() A B 0 1.0 4.0 1 3.0 3.0 2 3.0 2.0 3 4.0 1.0 If method is set to 'dense', it leaves no gaps in group. >>> df.rank(method='dense').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 3.0 1.0 If numeric_only is set to 'True', rank only numeric columns. >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B']) >>> df A B 0 1 a 1 2 b 2 2 d 3 3 c >>> df.rank(numeric_only=True) A 0 1.0 1 2.5 2 2.5 3 4.0 """ if numeric_only: numeric_col_names = [] for label in self._internal.column_labels: psser = self._psser_for(label) if isinstance(psser.spark.data_type, (NumericType, BooleanType)): numeric_col_names.append(psser.name) psdf = self[numeric_col_names] if numeric_only else self return psdf._apply_series_op( lambda psser: psser._rank(method=method, ascending=ascending), should_resolve=True ) def filter( self, items: Optional[Sequence[Any]] = None, like: Optional[str] = None, regex: Optional[str] = None, axis: Optional[Axis] = None, ) -> "DataFrame": """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 For a Series, >>> # select rows by name >>> df.one.filter(items=['rabbit']) rabbit 4 Name: one, dtype: int64 >>> # select rows by regular expression >>> df.one.filter(regex='e$') mouse 1 Name: one, dtype: int64 >>> # select rows containing 'bbi' >>> df.one.filter(like='bbi') rabbit 4 Name: one, dtype: int64 """ if sum(x is not None for x in (items, like, regex)) > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) axis = validate_axis(axis, none_axis=1) index_scols = self._internal.index_spark_columns if items is not None: if is_list_like(items): items = list(items) else: raise ValueError("items should be a list-like object.") if axis == 0: if len(index_scols) == 1: if len(items) <= ps.get_option("compute.isin_limit"): col = index_scols[0].isin([SF.lit(item) for item in items]) return DataFrame(self._internal.with_filter(col)) else: item_sdf_col = verify_temp_column_name( self._internal.spark_frame, "__item__" ) item_sdf = default_session().createDataFrame( pd.DataFrame({item_sdf_col: items}) ) joined_sdf = self._internal.spark_frame.join( other=F.broadcast(item_sdf), on=(index_scols[0] == scol_for(item_sdf, item_sdf_col)), how="semi", ) return DataFrame(self._internal.with_new_sdf(joined_sdf)) else: # for multi-index col = None for item in items: if not isinstance(item, tuple): raise TypeError("Unsupported type {}".format(type(item).__name__)) if not item: raise ValueError("The item should not be empty.") midx_col = None for i, element in enumerate(item): if midx_col is None: midx_col = index_scols[i] == SF.lit(element) else: midx_col = midx_col & (index_scols[i] == SF.lit(element)) if col is None: col = midx_col else: col = col | midx_col return DataFrame(self._internal.with_filter(col)) else: return self[items] elif like is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.contains(like) else: col = col | index_scol.contains(like) return DataFrame(self._internal.with_filter(col)) else: column_labels = self._internal.column_labels output_labels = [label for label in column_labels if any(like in i for i in label)] return self[output_labels] elif regex is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.rlike(regex) else: col = col | index_scol.rlike(regex) return DataFrame(self._internal.with_filter(col)) else: column_labels = self._internal.column_labels matcher = re.compile(regex) output_labels = [ label for label in column_labels if any(matcher.search(i) is not None for i in label) ] return self[output_labels] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def rename( self, mapper: Optional[Union[Dict, Callable[[Any], Any]]] = None, index: Optional[Union[Dict, Callable[[Any], Any]]] = None, columns: Optional[Union[Dict, Callable[[Any], Any]]] = None, axis: Axis = "index", inplace: bool = False, level: Optional[int] = None, errors: str = "ignore", ) -> Optional["DataFrame"]: """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don’t throw an error. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis’ values. Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index` and `columns`. index : dict-like or function Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper"). columns : dict-like or function Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper"). axis : int or str, default 'index' Axis to target with mapper. Can be either the axis name ('index', 'columns') or number (0, 1). inplace : bool, default False Whether to return a new DataFrame. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame with the renamed axis labels. Raises ------ `KeyError` If any of the labels is not found in the selected axis and "errors='raise'". Examples -------- >>> psdf1 = ps.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> psdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE a c 0 1 4 1 2 5 2 3 6 >>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> psdf1.rename(columns={"A": "a", "C": "c"}, errors="raise") Traceback (most recent call last): ... KeyError: 'Index include value which is not in the `mapper`' >>> def str_lower(s) -> str: ... return str.lower(s) >>> psdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE a b 0 1 4 1 2 5 2 3 6 >>> def mul10(x) -> int: ... return x * 10 >>> psdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]) >>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx) >>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE x y A B C D 0 1 2 3 4 1 5 6 7 8 >>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab')) >>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE a b x a 1 2 b 3 4 y c 5 6 d 7 8 """ def gen_mapper_fn( mapper: Union[Dict, Callable[[Any], Any]], skip_return_type: bool = False ) -> Tuple[Callable[[Any], Any], Dtype, DataType]: if isinstance(mapper, dict): mapper_dict = mapper type_set = set(map(lambda x: type(x), mapper_dict.values())) if len(type_set) > 1: raise ValueError("Mapper dict should have the same value type.") dtype, spark_return_type = pandas_on_spark_type(list(type_set)[0]) def mapper_fn(x: Any) -> Any: if x in mapper_dict: return mapper_dict[x] else: if errors == "raise": raise KeyError("Index include value which is not in the `mapper`") return x return mapper_fn, dtype, spark_return_type elif callable(mapper): mapper_callable = cast(Callable, mapper) def mapper_fn(x: Any) -> Any: return mapper_callable(x) if skip_return_type: return mapper_fn, None, None else: return_type = cast(ScalarType, infer_return_type(mapper)) dtype = return_type.dtype spark_return_type = return_type.spark_type return mapper_fn, dtype, spark_return_type else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) index_mapper_fn = None index_mapper_ret_stype = None columns_mapper_fn = None inplace = validate_bool_kwarg(inplace, "inplace") if mapper: axis = validate_axis(axis) if axis == 0: index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn( mapper ) elif axis == 1: columns_mapper_fn, _, _ = gen_mapper_fn(mapper) else: if index: index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn( index ) if columns: columns_mapper_fn, _, _ = gen_mapper_fn(columns, skip_return_type=True) if not index and not columns: raise ValueError("Either `index` or `columns` should be provided.") psdf = self.copy() if index_mapper_fn: # rename index labels, if `level` is None, rename all index columns, otherwise only # rename the corresponding level index. # implement this by transform the underlying spark dataframe, # Example: # suppose the psdf index column in underlying spark dataframe is "index_0", "index_1", # if rename level 0 index labels, will do: # ``psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))`` # if rename all index labels (`level` is None), then will do: # ``` # psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0")) # .withColumn("index_1", mapper_fn_udf(col("index_1")) # ``` index_columns = psdf._internal.index_spark_column_names num_indices = len(index_columns) if level: if level < 0 or level >= num_indices: raise ValueError("level should be an integer between [0, %s)" % num_indices) @pandas_udf(returnType=index_mapper_ret_stype) # type: ignore[call-overload] def index_mapper_udf(s: pd.Series) -> pd.Series: return s.map(index_mapper_fn) index_spark_columns = psdf._internal.index_spark_columns.copy() index_fields = psdf._internal.index_fields.copy() if level is None: for i in range(num_indices): index_spark_columns[i] = index_mapper_udf(index_spark_columns[i]).alias( index_columns[i] ) index_fields[i] = index_fields[i].copy( dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True, ) else: index_spark_columns[level] = index_mapper_udf(index_spark_columns[level]).alias( index_columns[level] ) index_fields[level] = index_fields[level].copy( dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True, ) psdf = DataFrame( psdf._internal.copy( index_spark_columns=index_spark_columns, index_fields=index_fields ) ) if columns_mapper_fn: # rename column name. # Will modify the `_internal._column_labels` and transform underlying spark dataframe # to the same column name with `_internal._column_labels`. if level: if level < 0 or level >= psdf._internal.column_labels_level: raise ValueError("level should be an integer between [0, column_labels_level)") def gen_new_column_labels_entry(column_labels_entry: Label) -> Label: if level is None: # rename all level columns return tuple(map(columns_mapper_fn, column_labels_entry)) else: # only rename specified level column entry_list = list(column_labels_entry) entry_list[level] = columns_mapper_fn(entry_list[level]) return tuple(entry_list) new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels)) new_data_pssers = [ psdf._psser_for(old_label).rename(new_label) for old_label, new_label in zip(psdf._internal.column_labels, new_column_labels) ] psdf = DataFrame(psdf._internal.with_new_columns(new_data_pssers)) if inplace: self._update_internal_frame(psdf._internal) return None else: return psdf def rename_axis( self, mapper: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None, index: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None, columns: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None, axis: Optional[Axis] = 0, inplace: Optional[bool] = False, ) -> Optional["DataFrame"]: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional A scalar, list-like, dict-like or functions transformations to apply to the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. inplace : bool, default False Modifies the object directly, instead of creating a new DataFrame. Returns ------- DataFrame, or None if `inplace` is True. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. The second calling convention will modify the names of the corresponding index specified by axis. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- >>> df = ps.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... index=["dog", "cat", "monkey"], ... columns=["num_legs", "num_arms"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal").sort_index() >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_arms animal cat 4 0 dog 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns").sort_index() >>> df # doctest: +NORMALIZE_WHITESPACE limbs num_legs num_arms animal cat 4 0 dog 4 0 monkey 2 2 **MultiIndex** >>> index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df = ps.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... index=index, ... columns=["num_legs", "num_arms"]) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE num_legs num_arms class name mammal cat 4 0 dog 4 0 monkey 2 2 >>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE num_legs num_arms TYPE NAME mammal cat 4 0 dog 4 0 monkey 2 2 """ def gen_names( v: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]], curnames: List[Name], ) -> List[Label]: newnames: List[Name] if is_scalar(v): newnames = [cast(Name, v)] elif is_list_like(v) and not is_dict_like(v): newnames = list(cast(Sequence[Name], v)) elif is_dict_like(v): v_dict = cast(Dict[Name, Name], v) newnames = [v_dict[name] if name in v_dict else name for name in curnames] elif callable(v): v_callable = cast(Callable[[Name], Name], v) newnames = [v_callable(name) for name in curnames] else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) if len(newnames) != len(curnames): raise ValueError( "Length of new names must be {}, got {}".format(len(curnames), len(newnames)) ) return [name if is_name_like_tuple(name) else (name,) for name in newnames] if mapper is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.") if mapper is not None: axis = validate_axis(axis) if axis == 0: index = mapper elif axis == 1: columns = mapper column_label_names = ( gen_names(columns, self.columns.names) if columns is not None else self._internal.column_label_names ) index_names = ( gen_names(index, self.index.names) if index is not None else self._internal.index_names ) internal = self._internal.copy( index_names=index_names, column_label_names=column_label_names ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def keys(self) -> pd.Index: """ Return alias for columns. Returns ------- Index Columns of the DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 >>> df.keys() Index(['max_speed', 'shield'], dtype='object') """ return self.columns def pct_change(self, periods: int = 1) -> "DataFrame": """ Percentage change between the current and a prior element. .. note:: the current implementation of this API uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. Returns ------- DataFrame Examples -------- Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = ps.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 You can set periods to shift for forming percent change >>> df.pct_change(2) FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 NaN NaN NaN 1980-03-01 0.067912 0.073814 0.06883 """ window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods) def op(psser: ps.Series) -> Column: prev_row = F.lag(psser.spark.column, periods).over(window) return ((psser.spark.column - prev_row) / prev_row).alias( psser._internal.data_spark_column_names[0] ) return self._apply_series_op(op, should_resolve=True) # TODO: axis = 1 def idxmax(self, axis: Axis = 0) -> "Series": """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with maximum value using `to_pandas()` because we suppose the number of rows with max values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmax Examples -------- >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmax() a 2 b 0 c 2 dtype: int64 For Multi-column Index >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> psdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmax() a x 2 b y 0 c z 2 dtype: int64 """ max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns) sdf_max = self._internal.spark_frame.select(*max_cols).head() # `sdf_max` looks like below # +------+------+------+ # |(a, x)|(b, y)|(c, z)| # +------+------+------+ # | 3| 4.0| 400| # +------+------+------+ conds = ( scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max) ) cond = reduce(lambda x, y: x | y, conds) psdf: DataFrame = DataFrame(self._internal.with_filter(cond)) return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax())) # TODO: axis = 1 def idxmin(self, axis: Axis = 0) -> "Series": """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with minimum value using `to_pandas()` because we suppose the number of rows with min values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmin Examples -------- >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmin() a 0 b 3 c 1 dtype: int64 For Multi-column Index >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> psdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> psdf.idxmin() a x 0 b y 3 c z 1 dtype: int64 """ min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns) sdf_min = self._internal.spark_frame.select(*min_cols).head() conds = ( scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min) ) cond = reduce(lambda x, y: x | y, conds) psdf: DataFrame = DataFrame(self._internal.with_filter(cond)) return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin())) def info( self, verbose: Optional[bool] = None, buf: Optional[IO[str]] = None, max_cols: Optional[int] = None, null_counts: Optional[bool] = None, ) -> None: """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. null_counts : bool, optional Whether to show the non-null counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = ps.DataFrame( ... {"int_col": int_values, "text_col": text_values, "float_col": float_values}, ... columns=['int_col', 'text_col', 'float_col']) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) # doctest: +SKIP <class 'pyspark.pandas.frame.DataFrame'> Index: 5 entries, 0 to 4 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_col 5 non-null int64 1 text_col 5 non-null object 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) # doctest: +SKIP <class 'pyspark.pandas.frame.DataFrame'> Index: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open('%s/info.txt' % path, "w", ... encoding="utf-8") as f: ... _ = f.write(s) >>> with open('%s/info.txt' % path) as f: ... f.readlines() # doctest: +SKIP ["<class 'pyspark.pandas.frame.DataFrame'>\\n", 'Index: 5 entries, 0 to 4\\n', 'Data columns (total 3 columns):\\n', ' # Column Non-Null Count Dtype \\n', '--- ------ -------------- ----- \\n', ' 0 int_col 5 non-null int64 \\n', ' 1 text_col 5 non-null object \\n', ' 2 float_col 5 non-null float64\\n', 'dtypes: float64(1), int64(1), object(1)'] """ # To avoid pandas' existing config affects pandas-on-Spark. # TODO: should we have corresponding pandas-on-Spark configs? with pd.option_context( "display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize ): try: # hack to use pandas' info as is. object.__setattr__(self, "_data", self) count_func = self.count self.count = ( # type: ignore[assignment] lambda: count_func()._to_pandas() # type: ignore[assignment, misc, union-attr] ) return pd.DataFrame.info( self, # type: ignore[arg-type] verbose=verbose, buf=buf, max_cols=max_cols, memory_usage=False, null_counts=null_counts, ) finally: del self._data self.count = count_func # type: ignore[assignment] # TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas' def quantile( self, q: Union[float, Iterable[float]] = 0.5, axis: Axis = 0, numeric_only: bool = True, accuracy: int = 10000, ) -> DataFrameOrSeries: """ Return value at the given quantile. .. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile based upon approximate percentile computation because computing quantile across a large dataset is extremely expensive. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. axis : int or str, default 0 or 'index' Can only be set to 0 at the moment. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. Can only be set to True at the moment. accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. Examples -------- >>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]}) >>> psdf a b 0 1 6 1 2 7 2 3 8 3 4 9 4 5 0 >>> psdf.quantile(.5) a 3.0 b 7.0 Name: 0.5, dtype: float64 >>> psdf.quantile([.25, .5, .75]) a b 0.25 2.0 6.0 0.50 3.0 7.0 0.75 4.0 8.0 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if not isinstance(accuracy, int): raise TypeError( "accuracy must be an integer; however, got [%s]" % type(accuracy).__name__ ) qq: Union[float, List[float]] = list(q) if isinstance(q, Iterable) else q for v in qq if isinstance(qq, list) else [qq]: if not isinstance(v, float): raise TypeError( "q must be a float or an array of floats; however, [%s] found." % type(v) ) if v < 0.0 or v > 1.0: raise ValueError("percentiles should all be in the interval [0, 1].") def quantile(psser: "Series") -> Column: spark_type = psser.spark.data_type spark_column = psser.spark.column if isinstance(spark_type, (BooleanType, NumericType)): return F.percentile_approx(spark_column.cast(DoubleType()), qq, accuracy) else: raise TypeError( "Could not convert {} ({}) to numeric".format( spark_type_to_pandas_dtype(spark_type), spark_type.simpleString() ) ) if isinstance(qq, list): # First calculate the percentiles from all columns and map it to each `quantiles` # by creating each entry as a struct. So, it becomes an array of structs as below: # # +-----------------------------------------+ # | arrays| # +-----------------------------------------+ # |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]| # +-----------------------------------------+ percentile_cols: List[Column] = [] percentile_col_names: List[str] = [] column_labels: List[Label] = [] for label, column in zip( self._internal.column_labels, self._internal.data_spark_column_names ): psser = self._psser_for(label) is_numeric_or_boolean = isinstance( psser.spark.data_type, (NumericType, BooleanType) ) keep_column = not numeric_only or is_numeric_or_boolean if keep_column: percentile_col = quantile(psser) percentile_cols.append(percentile_col.alias(column)) percentile_col_names.append(column) column_labels.append(label) if len(percentile_cols) == 0: return DataFrame(index=qq) sdf = self._internal.spark_frame.select(percentile_cols) # Here, after select percentile cols, a spark_frame looks like below: # +---------+---------+ # | a| b| # +---------+---------+ # |[2, 3, 4]|[6, 7, 8]| # +---------+---------+ cols_dict: Dict[str, List[Column]] = {} for column in percentile_col_names: cols_dict[column] = list() for i in range(len(qq)): cols_dict[column].append(scol_for(sdf, column)[i].alias(column)) internal_index_column = SPARK_DEFAULT_INDEX_NAME cols = [] for i, col in enumerate(zip(*cols_dict.values())): cols.append(F.struct(SF.lit(qq[i]).alias(internal_index_column), *col)) sdf = sdf.select(F.array(*cols).alias("arrays")) # And then, explode it and manually set the index. # +-----------------+---+---+ # |__index_level_0__| a| b| # +-----------------+---+---+ # | 0.25| 2| 6| # | 0.5| 3| 7| # | 0.75| 4| 8| # +-----------------+---+---+ sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, internal_index_column)], column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names], ) return DataFrame(internal) else: return self._reduce_for_stat_function( quantile, name="quantile", numeric_only=numeric_only ).rename(qq) def query(self, expr: str, inplace: bool = False) -> Optional["DataFrame"]: """ Query the columns of a DataFrame with a boolean expression. .. note:: Internal columns that starting with a '__' prefix are able to access, however, they are not supposed to be accessed. .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the pandas specific syntax such as `@` is not supported. If you want the pandas syntax, you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should be aware that `query_func` will be executed at different nodes in a distributed manner. So, for example, to use `@` syntax, make sure the variable is serialized by, for example, putting it within the closure as below. >>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)}) >>> def query_func(pdf): ... num = 1995 ... return pdf.query('A > @num') >>> df.pandas_on_spark.apply_batch(query_func) A B 1996 1996 1996 1997 1997 1997 1998 1998 1998 1999 1999 1999 Parameters ---------- expr : str The query string to evaluate. You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. Returns ------- DataFrame DataFrame resulting from the provided query expression. Examples -------- >>> df = ps.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ if isinstance(self.columns, pd.MultiIndex): raise TypeError("Doesn't support for MultiIndex columns") if not isinstance(expr, str): raise TypeError( "expr must be a string to be evaluated, {} given".format(type(expr).__name__) ) inplace = validate_bool_kwarg(inplace, "inplace") data_columns = [label[0] for label in self._internal.column_labels] sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [ scol.alias(col) for scol, col in zip(self._internal.data_spark_columns, data_columns) ] ).filter(expr) internal = self._internal.with_new_sdf(sdf, data_columns=data_columns) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def take(self, indices: List[int], axis: Axis = 0, **kwargs: Any) -> "DataFrame": """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]).sort_index() name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]).sort_index() name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ axis = validate_axis(axis) if not is_list_like(indices) or isinstance(indices, (dict, set)): raise TypeError("`indices` must be a list-like except dict or set") if axis == 0: return cast(DataFrame, self.iloc[indices, :]) else: return cast(DataFrame, self.iloc[:, indices]) def eval(self, expr: str, inplace: bool = False) -> Optional[DataFrameOrSeries]: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. Returns ------- The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Examples -------- >>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pyspark.pandas.series import first_series if isinstance(self.columns, pd.MultiIndex): raise TypeError("`eval` is not supported for multi-index columns") inplace = validate_bool_kwarg(inplace, "inplace") should_return_series = False series_name = None should_return_scalar = False # Since `eval_func` doesn't have a type hint, inferring the schema is always preformed # in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`, # and `should_return_scalar` can be updated. def eval_func(pdf): # type: ignore[no-untyped-def] nonlocal should_return_series nonlocal series_name nonlocal should_return_scalar result_inner = pdf.eval(expr, inplace=inplace) if inplace: result_inner = pdf if isinstance(result_inner, pd.Series): should_return_series = True series_name = result_inner.name result_inner = result_inner.to_frame() elif is_scalar(result_inner): should_return_scalar = True result_inner = pd.Series(result_inner).to_frame() return result_inner result = self.pandas_on_spark.apply_batch(eval_func) if inplace: # Here, the result is always a frame because the error is thrown during schema inference # from pandas. self._update_internal_frame(result._internal, check_same_anchor=False) return None elif should_return_series: return first_series(result).rename(series_name) elif should_return_scalar: return first_series(result)[0] else: # Returns a frame return result def explode(self, column: Name, ignore_index: bool = False) -> "DataFrame": """ Transform each element of a list-like to a row, replicating index values. Parameters ---------- column : str or tuple Column to explode. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. Returns ------- DataFrame Exploded lists to rows of the subset columns; index will be duplicated for these rows. See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) index labels. DataFrame.melt : Unpivot a DataFrame from wide format to long format. Examples -------- >>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1}) >>> df A B 0 [1, 2, 3] 1 1 [] 1 2 [3, 4] 1 >>> df.explode('A') A B 0 1.0 1 0 2.0 1 0 3.0 1 1 NaN 1 2 3.0 1 2 4.0 1 >>> df.explode('A', ignore_index=True) A B 0 1.0 1 1 2.0 1 2 3.0 1 3 NaN 1 4 3.0 1 5 4.0 1 """ from pyspark.pandas.series import Series if not is_name_like_value(column): raise TypeError("column must be a scalar") psdf: DataFrame = DataFrame(self._internal.resolved_copy) psser = psdf[column] if not isinstance(psser, Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(column) ) if not isinstance(psser.spark.data_type, ArrayType): return self.copy() sdf = psdf._internal.spark_frame.withColumn( psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column) ) data_fields = psdf._internal.data_fields.copy() idx = psdf._internal.column_labels.index(psser._column_label) field = data_fields[idx] spark_type = cast(ArrayType, field.spark_type).elementType dtype = spark_type_to_pandas_dtype(spark_type) data_fields[idx] = field.copy(dtype=dtype, spark_type=spark_type, nullable=True) internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields) result_df: DataFrame = DataFrame(internal) return result_df.reset_index(drop=True) if ignore_index else result_df def mad(self, axis: Axis = 0) -> "Series": """ Return the mean absolute deviation of values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. Examples -------- >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) >>> df.mad() a 0.666667 b 0.066667 dtype: float64 >>> df.mad(axis=1) 0 0.45 1 0.90 2 1.35 3 NaN dtype: float64 """ from pyspark.pandas.series import first_series axis = validate_axis(axis) if axis == 0: def get_spark_column(psdf: DataFrame, label: Label) -> Column: scol = psdf._internal.spark_column_for(label) col_type = psdf._internal.spark_type_for(label) if isinstance(col_type, BooleanType): scol = scol.cast("integer") return scol new_column_labels: List[Label] = [] for label in self._internal.column_labels: # Filtering out only columns of numeric and boolean type column. dtype = self._psser_for(label).spark.data_type if isinstance(dtype, (NumericType, BooleanType)): new_column_labels.append(label) new_columns = [ F.avg(get_spark_column(self, label)).alias(name_like_string(label)) for label in new_column_labels ] mean_data = self._internal.spark_frame.select(*new_columns).first() new_columns = [ F.avg( F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)]) ).alias(name_like_string(label)) for label in new_column_labels ] sdf = self._internal.spark_frame.select( *[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *new_columns ) # The data is expected to be small so it's fine to transpose/use default index. with ps.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) else: @pandas_udf(returnType=DoubleType()) # type: ignore[call-overload] def calculate_columns_axis(*cols: pd.Series) -> pd.Series: return pd.concat(cols, axis=1).mad(axis=1) internal = self._internal.copy( column_labels=[None], data_spark_columns=[ calculate_columns_axis(*self._internal.data_spark_columns).alias( SPARK_DEFAULT_SERIES_NAME ) ], data_fields=[None], column_label_names=None, ) return first_series(DataFrame(internal)) def mode(self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True) -> "DataFrame": """ Get the mode(s) of each element along the selected axis. The mode of a set of values is the value that appears most often. It can be multiple values. .. versionadded:: 3.4.0 Parameters ---------- axis : {0 or 'index'}, default 0 Axis for the function to be applied on. numeric_only : bool, default False If True, only apply to numeric columns. dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- DataFrame The modes of each column or row. See Also -------- Series.mode : Return the highest frequency value in a Series. Series.value_counts : Return the counts of values in a Series. Examples -------- >>> df = ps.DataFrame([('bird', 2, 2), ... ('mammal', 4, np.nan), ... ('arthropod', 8, 0), ... ('bird', 2, np.nan)], ... index=('falcon', 'horse', 'spider', 'ostrich'), ... columns=('species', 'legs', 'wings')) >>> df species legs wings falcon bird 2 2.0 horse mammal 4 NaN spider arthropod 8 0.0 ostrich bird 2 NaN By default, missing values are not considered, and the mode of wings are both 0 and 2. Because the resulting DataFrame has two rows, the second row of ``species`` and ``legs`` contains ``NaN``. >>> df.mode() species legs wings 0 bird 2.0 0.0 1 None NaN 2.0 Setting ``dropna=False`` ``NaN`` values are considered and they can be the mode (like for wings). >>> df.mode(dropna=False) species legs wings 0 bird 2 NaN Setting ``numeric_only=True``, only the mode of numeric columns is computed, and columns of other types are ignored. >>> df.mode(numeric_only=True) legs wings 0 2.0 0.0 1 NaN 2.0 """ axis = validate_axis(axis, none_axis=0) if axis != 0: raise ValueError('axis should be either 0 or "index" currently.') if numeric_only is None and axis == 0: numeric_only = True mode_scols: List[Column] = [] mode_col_names: List[str] = [] mode_labels: List[Label] = [] for label, col_name in zip( self._internal.column_labels, self._internal.data_spark_column_names ): psser = self._psser_for(label) is_numeric = isinstance(psser.spark.data_type, (NumericType, BooleanType)) if not numeric_only or is_numeric: scol = psser.spark.column mode_scol = SF.mode(scol, dropna).alias(col_name) mode_scols.append(mode_scol) mode_col_names.append(col_name) mode_labels.append(label) # Here, after aggregation, a spark_frame looks like below: # +-------+----+----------+ # |species|legs| wings| # +-------+----+----------+ # | [bird]| [2]|[0.0, 2.0]| # +-------+----+----------+ sdf = self._internal.spark_frame.select(mode_scols) sdf = sdf.select(*[F.array_sort(F.col(name)).alias(name) for name in mode_col_names]) tmp_zip_col = "__tmp_zip_col__" tmp_explode_col = "__tmp_explode_col__" # After this transformation, sdf turns out to be: # +-------+----+-----+ # |species|legs|wings| # +-------+----+-----+ # | bird| 2| 0.0| # | null|null| 2.0| # +-------+----+-----+ sdf = ( sdf.select(F.arrays_zip(*[F.col(name) for name in mode_col_names]).alias(tmp_zip_col)) .select(F.explode(F.col(tmp_zip_col)).alias(tmp_explode_col)) .select( *[ F.col("{0}.{1}".format(tmp_explode_col, name)).alias(name) for name in mode_col_names ] ) ) sdf = sdf.withColumn(SPARK_DEFAULT_INDEX_NAME, F.monotonically_increasing_id()) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=mode_labels, data_spark_columns=[scol_for(sdf, col) for col in mode_col_names], ) return DataFrame(internal) def tail(self, n: int = 5) -> "DataFrame": """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `n` rows, equivalent to ``df[n:]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() # doctest: +SKIP animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) # doctest: +SKIP animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) # doctest: +SKIP animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if not isinstance(n, int): raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__)) if n < 0: n = len(self) + n if n <= 0: return ps.DataFrame(self._internal.with_filter(SF.lit(False))) # Should use `resolved_copy` here for the case like `(psdf + 1).tail()` sdf = self._internal.resolved_copy.spark_frame rows = sdf.tail(n) new_sdf = default_session().createDataFrame(rows, sdf.schema) return DataFrame(self._internal.with_new_sdf(new_sdf)) def align( self, other: DataFrameOrSeries, join: str = "outer", axis: Optional[Axis] = None, copy: bool = True, ) -> Tuple["DataFrame", DataFrameOrSeries]: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. Returns ------- (left, right) : (DataFrame, type of other) Aligned objects. Examples -------- >>> ps.set_option("compute.ops_on_diff_frames", True) >>> df1 = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30]) >>> df2 = ps.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12]) Align both axis: >>> aligned_l, aligned_r = df1.align(df2) >>> aligned_l.sort_index() a b c 10 1.0 a NaN 11 NaN None NaN 12 NaN None NaN 20 2.0 b NaN 30 3.0 c NaN >>> aligned_r.sort_index() a b c 10 4.0 NaN d 11 5.0 NaN e 12 6.0 NaN f 20 NaN NaN None 30 NaN NaN None Align only axis=0 (index): >>> aligned_l, aligned_r = df1.align(df2, axis=0) >>> aligned_l.sort_index() a b 10 1.0 a 11 NaN None 12 NaN None 20 2.0 b 30 3.0 c >>> aligned_r.sort_index() a c 10 4.0 d 11 5.0 e 12 6.0 f 20 NaN None 30 NaN None Align only axis=1 (column): >>> aligned_l, aligned_r = df1.align(df2, axis=1) >>> aligned_l.sort_index() a b c 10 1 a NaN 20 2 b NaN 30 3 c NaN >>> aligned_r.sort_index() a b c 10 4 NaN d 11 5 NaN e 12 6 NaN f Align with the join type "inner": >>> aligned_l, aligned_r = df1.align(df2, join="inner") >>> aligned_l.sort_index() a 10 1 >>> aligned_r.sort_index() a 10 4 Align with a Series: >>> s = ps.Series([7, 8, 9], index=[10, 11, 12]) >>> aligned_l, aligned_r = df1.align(s, axis=0) >>> aligned_l.sort_index() a b 10 1.0 a 11 NaN None 12 NaN None 20 2.0 b 30 3.0 c >>> aligned_r.sort_index() 10 7.0 11 8.0 12 9.0 20 NaN 30 NaN dtype: float64 >>> ps.reset_option("compute.ops_on_diff_frames") """ from pyspark.pandas.series import Series, first_series if not isinstance(other, (DataFrame, Series)): raise TypeError("unsupported type: {}".format(type(other).__name__)) how = validate_how(join) axis = validate_axis(axis, None) right_is_series = isinstance(other, Series) if right_is_series: if axis is None: raise ValueError("Must specify axis=0 or 1") elif axis != 0: raise NotImplementedError( "align currently only works for axis=0 when right is Series" ) left = self right = other if (axis is None or axis == 0) and not same_anchor(left, right): combined = combine_frames(left, right, how=how) left = combined["this"] right = combined["that"] if right_is_series: right = first_series(cast(DataFrame[Any], right)).rename(other.name) if ( axis is None or axis == 1 ) and left._internal.column_labels != right._internal.column_labels: if left._internal.column_labels_level != right._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") left = left.copy() right = right.copy() if how == "full": column_labels = sorted( list(set(left._internal.column_labels) | set(right._internal.column_labels)) ) elif how == "inner": column_labels = sorted( list(set(left._internal.column_labels) & set(right._internal.column_labels)) ) elif how == "left": column_labels = left._internal.column_labels else: column_labels = right._internal.column_labels for label in column_labels: if label not in left._internal.column_labels: left[label] = SF.lit(None).cast(DoubleType()) left = left[column_labels] for label in column_labels: if label not in right._internal.column_labels: right[label] = SF.lit(None).cast(DoubleType()) right = right[column_labels] return (left.copy(), right.copy()) if copy else (left, right) @staticmethod def from_dict( data: Dict[Name, Sequence[Any]], orient: str = "columns", dtype: Union[str, Dtype] = None, columns: Optional[List[Name]] = None, ) -> "DataFrame": """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]} >>> ps.DataFrame.from_dict(data) col_1 col_2 0 3 10 1 2 20 2 1 30 3 0 40 Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]} >>> ps.DataFrame.from_dict(data, orient='index').sort_index() 0 1 2 3 row_1 3 2 1 0 row_2 10 20 30 40 When using the 'index' orientation, the column names can be specified manually: >>> ps.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']).sort_index() A B C D row_1 3 2 1 0 row_2 10 20 30 40 """ return DataFrame( pd.DataFrame.from_dict( data, orient=orient, dtype=dtype, columns=columns # type: ignore[arg-type] ) ) # Override the `groupby` to specify the actual return type annotation. def groupby( self, by: Union[Name, "Series", List[Union[Name, "Series"]]], axis: Axis = 0, as_index: bool = True, dropna: bool = True, ) -> "DataFrameGroupBy": return cast( "DataFrameGroupBy", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna) ) groupby.__doc__ = Frame.groupby.__doc__ def _build_groupby( self, by: List[Union["Series", Label]], as_index: bool, dropna: bool ) -> "DataFrameGroupBy": from pyspark.pandas.groupby import DataFrameGroupBy return DataFrameGroupBy._build(self, by, as_index=as_index, dropna=dropna) def resample( self, rule: str, closed: Optional[str] = None, label: Optional[str] = None, on: Optional["Series"] = None, ) -> "DataFrameResampler": """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (only support `DatetimeIndex` for now), or the caller must pass the label of a datetime-like series/index to the ``on`` keyword parameter. .. versionadded:: 3.4.0 Parameters ---------- rule : str The offset string or object representing target conversion. Currently, supported units are {'Y', 'A', 'M', 'D', 'H', 'T', 'MIN', 'S'}. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'A', 'Y' and 'M' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'A', 'Y' and 'M' which all have a default of 'right'. on : Series, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. Returns ------- DataFrameResampler See Also -------- Series.resample : Resample a Series. groupby : Group by mapping, function, label, or list of labels. """ from pyspark.pandas.indexes import DatetimeIndex from pyspark.pandas.resample import DataFrameResampler if on is None and not isinstance(self.index, DatetimeIndex): raise NotImplementedError("resample currently works only for DatetimeIndex") if on is not None and not isinstance(as_spark_type(on.dtype), TimestampType): raise NotImplementedError("`on` currently works only for TimestampType") agg_columns: List[ps.Series] = [] for column_label in self._internal.column_labels: if isinstance(self._internal.spark_type_for(column_label), (NumericType, BooleanType)): agg_columns.append(self._psser_for(column_label)) if len(agg_columns) == 0: raise ValueError("No available aggregation columns!") return DataFrameResampler( psdf=self, resamplekey=on, rule=rule, closed=closed, label=label, agg_columns=agg_columns, ) def _to_internal_pandas(self) -> pd.DataFrame: """ Return a pandas DataFrame directly from _internal to avoid overhead of copy. This method is for internal use only. """ return self._internal.to_pandas_frame def _get_or_create_repr_pandas_cache(self, n: int) -> Union[pd.DataFrame, pd.Series]: if not hasattr(self, "_repr_pandas_cache") or n not in self._repr_pandas_cache: object.__setattr__( self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()} ) return self._repr_pandas_cache[n] def __repr__(self) -> str: max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_string() pdf = cast("DataFrame", self._get_or_create_repr_pandas_cache(max_display_count)) pdf_length = len(pdf) pdf = cast("DataFrame", pdf.iloc[:max_display_count]) if pdf_length > max_display_count: repr_string = pdf.to_string(show_dimensions=True) match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format( nrows=nrows, ncols=ncols ) return REPR_PATTERN.sub(footer, repr_string) return pdf.to_string() def _repr_html_(self) -> str: max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_html(notebook=True) pdf = self._get_or_create_repr_pandas_cache(max_display_count) pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_html = pdf.to_html(show_dimensions=True, notebook=True) match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ( "\n<p>Showing only the first {rows} rows " "{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols) ) return REPR_HTML_PATTERN.sub(footer, repr_html) return pdf.to_html(notebook=True) def __getitem__(self, key: Any) -> Any: from pyspark.pandas.series import Series if key is None: raise KeyError("none key") elif isinstance(key, Series): return self.loc[key.astype(bool)] elif isinstance(key, slice): if any(type(n) == int or None for n in [key.start, key.stop]): # Seems like pandas Frame always uses int as positional search when slicing # with ints. return self.iloc[key] return self.loc[key] elif is_name_like_value(key): return self.loc[:, key] elif is_list_like(key): return self.loc[:, list(key)] def __setitem__(self, key: Any, value: Any) -> None: from pyspark.pandas.series import Series if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self): # Different Series or DataFrames level = self._internal.column_labels_level key = DataFrame._index_normalized_label(level, key) value = DataFrame._index_normalized_frame(level, value) def assign_columns( psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label] ) -> Iterator[Tuple["Series", Label]]: assert len(key) == len(that_column_labels) # Note that here intentionally uses `zip_longest` that combine # that_columns. for k, this_label, that_label in zip_longest( key, this_column_labels, that_column_labels ): yield (psdf._psser_for(that_label), tuple(["that", *k])) if this_label is not None and this_label[1:] != k: yield (psdf._psser_for(this_label), this_label) psdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left") elif isinstance(value, list): if len(self) != len(value): raise ValueError("Length of values does not match length of index") # TODO: avoid using default index? with option_context( "compute.default_index_type", "distributed-sequence", "compute.ops_on_diff_frames", True, ): psdf = self.reset_index() psdf[key] = ps.DataFrame(value) psdf = psdf.set_index(psdf.columns[: self._internal.index_level]) psdf.index.names = self.index.names elif isinstance(key, list): assert isinstance(value, DataFrame) # Same DataFrames. field_names = value.columns psdf = self._assign({k: value[c] for k, c in zip(key, field_names)}) else: # Same Series. psdf = self._assign({key: value}) # Since Spark 3.4, df.__setitem__ generates a new dataframe instead of operating # in-place to follow pandas v1.4 behavior, see also SPARK-38946. self._update_internal_frame(psdf._internal, anchor_force_disconnect=True) @staticmethod def _index_normalized_label(level: int, labels: Union[Name, Sequence[Name]]) -> List[Label]: """ Returns a label that is normalized against the current column index level. For example, the key "abc" can be ("abc", "", "") if the current Frame has a multi-index for its column """ if is_name_like_tuple(labels): labels = [labels] elif is_name_like_value(labels): labels = [(labels,)] else: labels = [k if is_name_like_tuple(k) else (k,) for k in labels] if any(len(label) > level for label in labels): raise KeyError( "Key length ({}) exceeds index depth ({})".format( max(len(label) for label in labels), level ) ) return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels] @staticmethod def _index_normalized_frame(level: int, psser_or_psdf: DataFrameOrSeries) -> "DataFrame": """ Returns a frame that is normalized against the current column index level. For example, the name in `pd.Series([...], name="abc")` can be can be ("abc", "", "") if the current DataFrame has a multi-index for its column """ from pyspark.pandas.series import Series if isinstance(psser_or_psdf, Series): psdf = psser_or_psdf.to_frame() else: assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf) psdf = psser_or_psdf.copy() psdf.columns = pd.MultiIndex.from_tuples( [ tuple([name_like_string(label)] + ([""] * (level - 1))) for label in psdf._internal.column_labels ], ) return psdf def __getattr__(self, key: str) -> Any: if key.startswith("__"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) else: return partial(property_or_func, self) try: return self.loc[:, key] except KeyError: raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, key) ) def __setattr__(self, key: str, value: Any) -> None: try: object.__getattribute__(self, key) return object.__setattr__(self, key, value) except AttributeError: pass if (key,) in self._internal.column_labels: self[key] = value else: msg = "pandas-on-Spark doesn't allow columns to be created via a new attribute name" if is_testing(): raise AssertionError(msg) else: warnings.warn(msg, UserWarning) def __len__(self) -> int: return self._internal.resolved_copy.spark_frame.count() def __dir__(self) -> Iterable[str]: fields = [ f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if " " not in f ] return list(super().__dir__()) + fields def __iter__(self) -> Iterator[Name]: return iter(self.columns) # NDArray Compat def __array_ufunc__( self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any ) -> "DataFrame": # TODO: is it possible to deduplicate it with '_map_series_op'? if all(isinstance(inp, DataFrame) for inp in inputs) and any( not same_anchor(inp, inputs[0]) for inp in inputs ): # binary only assert len(inputs) == 2 this = inputs[0] that = inputs[1] if this._internal.column_labels_level != that._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") # Different DataFrames def apply_op( psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label] ) -> Iterator[Tuple["Series", Label]]: for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( ufunc( psdf._psser_for(this_label), psdf._psser_for(that_label), **kwargs ).rename(this_label), this_label, ) return align_diff_frames(apply_op, this, that, fillna=True, how="full") else: # DataFrame and Series applied = [] this = inputs[0] assert all(inp is this for inp in inputs if isinstance(inp, DataFrame)) for label in this._internal.column_labels: arguments = [] for inp in inputs: arguments.append(inp[label] if isinstance(inp, DataFrame) else inp) # both binary and unary. applied.append(ufunc(*arguments, **kwargs).rename(label)) internal = this._internal.with_new_columns(applied) return DataFrame(internal) def __class_getitem__(cls, params: Any) -> object: # This is a workaround to support variadic generic in DataFrame in Python 3.7. # See https://github.com/python/typing/issues/193 # we always wraps the given type hints by a tuple to mimic the variadic generic. return create_tuple_for_frame_type(params) def _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any: """ Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions. """ assert isinstance(sdf, SparkDataFrame) sdf0 = sdf.agg(*aggs) lst = sdf0.limit(2).toPandas() assert len(lst) == 1, (sdf, lst) row = lst.iloc[0] lst2 = list(row) assert len(lst2) == len(aggs), (row, lst2) return lst2 class CachedDataFrame(DataFrame): """ Cached pandas-on-Spark DataFrame, which corresponds to pandas DataFrame logically, but internally it caches the corresponding Spark DataFrame. """ def __init__(self, internal: InternalFrame, storage_level: Optional[StorageLevel] = None): if storage_level is None: object.__setattr__(self, "_cached", internal.spark_frame.cache()) elif isinstance(storage_level, StorageLevel): object.__setattr__(self, "_cached", internal.spark_frame.persist(storage_level)) else: raise TypeError( "Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`" ) super().__init__(internal) def __enter__(self) -> "CachedDataFrame": return self def __exit__( self, exception_type: Optional[Type[BaseException]], exception_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> Optional[bool]: self.spark.unpersist() return None # create accessor for Spark related methods. spark = CachedAccessor("spark", CachedSparkFrameMethods) def _test() -> None: import os import doctest import shutil import sys import tempfile import uuid from pyspark.sql import SparkSession import pyspark.pandas.frame os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.pandas.frame.__dict__.copy() globs["ps"] = pyspark.pandas spark = ( SparkSession.builder.master("local[4]").appName("pyspark.pandas.frame tests").getOrCreate() ) db_name = "db%s" % str(uuid.uuid4()).replace("-", "") spark.sql("CREATE DATABASE %s" % db_name) globs["db"] = db_name path = tempfile.mkdtemp() globs["path"] = path (failure_count, test_count) = doctest.testmod( pyspark.pandas.frame, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE, ) shutil.rmtree(path, ignore_errors=True) spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name) spark.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
{ "content_hash": "1079224948d0aba184e244886baf74e0", "timestamp": "", "source": "github", "line_count": 13260, "max_line_length": 100, "avg_line_length": 36.13808446455505, "alnum_prop": 0.5059235252748904, "repo_name": "zero323/spark", "id": "72913bc17d3062c83aa1efa8c2dcc7a1e9c68d78", "size": "480089", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/pyspark/pandas/frame.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ANTLR", "bytes": "58123" }, { "name": "Batchfile", "bytes": "27405" }, { "name": "C", "bytes": "1493" }, { "name": "CSS", "bytes": "26338" }, { "name": "Dockerfile", "bytes": "16099" }, { "name": "HTML", "bytes": "42080" }, { "name": "HiveQL", "bytes": "1859465" }, { "name": "Java", "bytes": "4699504" }, { "name": "JavaScript", "bytes": "222842" }, { "name": "Jupyter Notebook", "bytes": "4310512" }, { "name": "Makefile", "bytes": "2379" }, { "name": "PLpgSQL", "bytes": "352609" }, { "name": "PowerShell", "bytes": "4221" }, { "name": "Python", "bytes": "7815551" }, { "name": "R", "bytes": "1286372" }, { "name": "ReScript", "bytes": "240" }, { "name": "Roff", "bytes": "31582" }, { "name": "Scala", "bytes": "43514682" }, { "name": "Shell", "bytes": "241106" }, { "name": "Thrift", "bytes": "2016" }, { "name": "q", "bytes": "111129" } ], "symlink_target": "" }
from wtforms import Form, TextField, TextAreaField, BooleanField, IntegerField, DateField, DecimalField, validators #Team Form model class TeamForm(Form): teamName = TextField('teamName') sp1Name = TextField('sp1Name') sp1Novice = BooleanField('sp1Novice') sp1ESL = BooleanField('sp1ESL') sp2Name = TextField('sp2Name') sp2Novice = BooleanField('sp2Novice') sp2ESL = BooleanField('sp2ESL') sp1Key = TextField('sp1Key') sp2Key = TextField('sp2Key') class InstRegForm(Form): name = TextField('name', [validators.Required()]) # Judge Form model class JudgeForm(Form): name = TextField('name', [validators.required()]) phone = TextField('phone', [validators.required()]) #Custom Room Status Form model class CustomStatusForm(Form): name = TextField('name', [validators.required()]) class ProfileForm(Form): name = TextField('name', [validators.required()]) institution = TextField('institution') public = BooleanField('public') email = TextField('email', [validators.email(), validators.optional()]) email_code = TextField('email_code') phone = TextField('phone') class SpeakerRecordForm(Form): tournamentName = TextField('tournamentName', [validators.required()]) startDate = DateField('startDate', [validators.required()]) teamRank = IntegerField('teamRank', [validators.required()]) speakerRank = IntegerField('speakerRank', [validators.required()]) averageSpeaks = DecimalField('averageSpeaks', [validators.required()]) champion = BooleanField('champion') finalist = BooleanField('finalist') semifinalist = BooleanField('semifinalist') quarterfinalist = BooleanField('quarterfinalist') octofinalist = BooleanField('octofinalist') doubleoctofinalist = BooleanField('doubleoctofinalist') eslChampion = BooleanField('ESLChampion') eslBreak = BooleanField('ESLBreak') eflChampion = BooleanField('EFLChampion') eflBreak = BooleanField('EFLBreak') noviceChampion = BooleanField('noviceChampion') noviceBreak = BooleanField('noviceBreak') class JudgeRecordForm(Form): tournamentName = TextField('tournamentName', [validators.required()]) startDate = DateField('startDate', [validators.required()]) chair = BooleanField('chair') broke = BooleanField('broke') outroundChair = BooleanField('outroundChair') CA = BooleanField('CA') DCA = BooleanField('DCA') equity = BooleanField('Equity') class TournamentInfoForm(Form): blurb = TextAreaField('blurb', [validators.optional()]) facebook = TextField('facebook', [validators.url(require_tld=True),validators.optional()]) homepage = TextField('homepage', [validators.url(require_tld=True),validators.optional()]) email = TextField('email', [validators.email(), validators.optional()])
{ "content_hash": "505dafa7135001d9919da271f0474f27", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 115, "avg_line_length": 35.78666666666667, "alnum_prop": 0.7529806259314457, "repo_name": "sarrionandia/tournatrack", "id": "b13adbfa92e436617c2e6251c0a7f5eb2071603b", "size": "3274", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "forms.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "280845" } ], "symlink_target": "" }
from __future__ import absolute_import import math import uproot3.interp.interp import uproot3.interp.numerical class _JaggedArrayPrep(object): def __init__(self, counts, content): self.counts = counts self.content = content def _destructive_divide(array, divisor, awkward0): if divisor == 1: pass elif divisor == 2: awkward0.numpy.right_shift(array, 1, out=array) elif divisor == 4: awkward0.numpy.right_shift(array, 2, out=array) elif divisor == 8: awkward0.numpy.right_shift(array, 3, out=array) else: awkward0.numpy.floor_divide(array, divisor, out=array) return array class asjagged(uproot3.interp.interp.Interpretation): # makes __doc__ attribute mutable before Python 3.3 __metaclass__ = type.__new__(type, "type", (uproot3.interp.interp.Interpretation.__metaclass__,), {}) def __init__(self, content, skipbytes=0): self.content = content self.skipbytes = skipbytes def __repr__(self): return "asjagged({0}{1})".format(repr(self.content), "" if self.skipbytes == 0 else ", {0}".format(self.skipbytes)) def to(self, todtype=None, todims=None, skipbytes=None): if skipbytes is None: skipbytes = self.skipbytes return asjagged(self.content.to(todtype, todims), skipbytes) @property def identifier(self): return "asjagged({0}{1})".format(self.content.identifier, "" if self.skipbytes == 0 else ",{0}".format(self.skipbytes)) @property def type(self): return self.awkward0.type.ArrayType(self.awkward0.numpy.inf, self.content.type) def empty(self): return self.awkward0.JaggedArray(self.awkward0.numpy.empty(0, dtype=self.awkward0.JaggedArray.INDEXTYPE), self.awkward0.numpy.empty(0, dtype=self.awkward0.JaggedArray.INDEXTYPE), self.content.empty()) def compatible(self, other): return isinstance(other, asjagged) and self.content.compatible(other.content) def numitems(self, numbytes, numentries): return self.content.numitems(numbytes - numentries * self.skipbytes, numentries) def source_numitems(self, source): return self.content.source_numitems(source.content) def fromroot(self, data, byteoffsets, local_entrystart, local_entrystop, keylen): if local_entrystart == local_entrystop: return self.awkward0.JaggedArray.fromoffsets([0], self.content.fromroot(data, None, local_entrystart, local_entrystop, keylen)) else: if self.skipbytes == 0: offsets = _destructive_divide(byteoffsets, self.content.itemsize, self.awkward0) starts = offsets[local_entrystart : local_entrystop ] stops = offsets[local_entrystart + 1 : local_entrystop + 1] content = self.content.fromroot(data, None, starts[0], stops[-1], keylen) return self.awkward0.JaggedArray(starts, stops, content) else: bytestarts = byteoffsets[local_entrystart : local_entrystop ] + self.skipbytes bytestops = byteoffsets[local_entrystart + 1 : local_entrystop + 1] mask = self.awkward0.numpy.zeros(len(data), dtype=self.awkward0.numpy.int8) mask[bytestarts[bytestarts < len(data)]] = 1 self.awkward0.numpy.add.at(mask, bytestops[bytestops < len(data)], -1) self.awkward0.numpy.cumsum(mask, out=mask) data = data[mask.view(self.awkward0.numpy.bool_)] content = self.content.fromroot(data, None, 0, bytestops[-1], keylen) itemsize = 1 sub = self.content while hasattr(sub, "content"): sub = sub.content if isinstance(sub, uproot3.interp.numerical.asdtype): itemsize = sub.fromdtype.itemsize if isinstance(sub, uproot3.interp.numerical.asstlbitset): itemsize = sub.numbytes + 4 counts = bytestops - bytestarts shift = math.log(itemsize, 2) if shift == round(shift): self.awkward0.numpy.right_shift(counts, int(shift), out=counts) else: self.awkward0.numpy.floor_divide(counts, itemsize, out=counts) offsets = self.awkward0.numpy.empty(len(counts) + 1, self.awkward0.JaggedArray.INDEXTYPE) offsets[0] = 0 self.awkward0.numpy.cumsum(counts, out=offsets[1:]) return self.awkward0.JaggedArray(offsets[:-1], offsets[1:], content) def destination(self, numitems, numentries): content = self.content.destination(numitems, numentries) counts = self.awkward0.numpy.empty(numentries, dtype=self.awkward0.JaggedArray.INDEXTYPE) return _JaggedArrayPrep(counts, content) def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop): self.content.fill(source.content, destination.content, itemstart, itemstop, entrystart, entrystop) destination.counts[entrystart:entrystop] = source.stops - source.starts def clip(self, destination, itemstart, itemstop, entrystart, entrystop): destination.content = self.content.clip(destination.content, itemstart, itemstop, entrystart, entrystop) destination.counts = destination.counts[entrystart:entrystop] return destination def finalize(self, destination, branch): content = self.content.finalize(destination.content, branch) leafcount = None if len(branch._fLeaves) == 1: leafcount = branch._fLeaves[0]._fLeafCount out = self.awkward0.Methods.maybemixin(type(content), self.awkward0.JaggedArray).fromcounts(destination.counts, content) out.leafcount = leafcount if self.debug_reading: print("reading {0}".format(repr(out))) return out
{ "content_hash": "11ceab44c783a12eba4ad21624aea01f", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 208, "avg_line_length": 45.45038167938932, "alnum_prop": 0.6424252603291905, "repo_name": "scikit-hep/uproot", "id": "72e6d1aeac15caed9ec0573f0a6534836c55bc5d", "size": "6064", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "uproot3/interp/jagged.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "15573" }, { "name": "C++", "bytes": "100" }, { "name": "Jupyter Notebook", "bytes": "270594" }, { "name": "Python", "bytes": "813073" }, { "name": "Shell", "bytes": "566" } ], "symlink_target": "" }
"""Run the first page of one benchmark for every module. Only benchmarks that have a composable measurement are included. Ideally this test would be comprehensive, however, running one page of every benchmark would run impractically long. """ import os import sys import unittest from telemetry import benchmark as benchmark_module from telemetry.core import discover from telemetry.testing import options_for_unittests from telemetry.testing import progress_reporter from benchmarks import image_decoding from benchmarks import indexeddb_perf from benchmarks import jetstream from benchmarks import kraken from benchmarks import memory from benchmarks import new_tab from benchmarks import octane from benchmarks import rasterize_and_record_micro from benchmarks import repaint from benchmarks import spaceport from benchmarks import speedometer from benchmarks import sunspider from benchmarks import text_selection def SmokeTestGenerator(benchmark): # NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST. # # This smoke test dynamically tests all benchmarks. So disabling it for one # failing or flaky benchmark would disable a much wider swath of coverage # than is usally intended. Instead, if a particular benchmark is failing, # disable it in tools/perf/benchmarks/*. @benchmark_module.Disabled('chromeos') # crbug.com/351114 def BenchmarkSmokeTest(self): # Only measure a single page so that this test cycles reasonably quickly. benchmark.options['pageset_repeat'] = 1 benchmark.options['page_repeat'] = 1 class SinglePageBenchmark(benchmark): # pylint: disable=no-init def CreateStorySet(self, options): # pylint: disable=super-on-old-class story_set = super(SinglePageBenchmark, self).CreateStorySet(options) for story in story_set.stories: story.skip_waits = True story_set.stories = [story] break return story_set # Set the benchmark's default arguments. options = options_for_unittests.GetCopy() options.output_format = 'none' parser = options.CreateParser() benchmark.AddCommandLineArgs(parser) benchmark_module.AddCommandLineArgs(parser) benchmark.SetArgumentDefaults(parser) options.MergeDefaultValues(parser.get_default_values()) benchmark.ProcessCommandLineArgs(None, options) benchmark_module.ProcessCommandLineArgs(None, options) self.assertEqual(0, SinglePageBenchmark().Run(options), msg='Failed: %s' % benchmark) return BenchmarkSmokeTest # The list of benchmark modules to be excluded from our smoke tests. _BLACK_LIST_TEST_MODULES = { image_decoding, # Always fails on Mac10.9 Tests builder. indexeddb_perf, # Always fails on Win7 & Android Tests builder. new_tab, # Fails fairly often on the Linux Tests builder, crbug.com/535664 octane, # Often fails & take long time to timeout on cq bot. rasterize_and_record_micro, # Always fails on cq bot. repaint, # Often fails & takes long time to timeout on cq bot. spaceport, # Takes 451 seconds. speedometer, # Takes 101 seconds. jetstream, # Take 206 seconds. text_selection, # Always fails on cq bot. memory # Flaky on bots, crbug.com/513767 } # Some smoke benchmark tests that run quickly on desktop platform can be very # slow on Android. So we create a separate set of black list only for Android. _ANDROID_BLACK_LIST_MODULES = { kraken, # Takes 275 seconds on Android. sunspider, # Takes 163 seconds on Android. } def load_tests(loader, standard_tests, pattern): del loader, standard_tests, pattern # unused suite = progress_reporter.TestSuite() benchmarks_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(benchmarks_dir) # Using the default of |index_by_class_name=False| means that if a module # has multiple benchmarks, only the last one is returned. all_benchmarks = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=False).values() for benchmark in all_benchmarks: if sys.modules[benchmark.__module__] in _BLACK_LIST_TEST_MODULES: continue # TODO(tonyg): Smoke doesn't work with session_restore yet. if (benchmark.Name().startswith('session_restore') or benchmark.Name().startswith('skpicture_printer')): continue if hasattr(benchmark, 'generated_profile_archive'): # We'd like to test these, but don't know how yet. continue class BenchmarkSmokeTest(unittest.TestCase): pass method = SmokeTestGenerator(benchmark) # Make sure any decorators are propagated from the original declaration. # (access to protected members) pylint: disable=protected-access # TODO(dpranke): Since we only pick the first test from every class # (above), if that test is disabled, we'll end up not running *any* # test from the class. We should probably discover all of the tests # in a class, and then throw the ones we don't need away instead. # Merge decorators. for attribute in ['_enabled_strings', '_disabled_strings']: # Do set union of attributes to eliminate duplicates. merged_attributes = getattr(method, attribute, set()).union( getattr(benchmark, attribute, set())) if merged_attributes: setattr(method, attribute, merged_attributes) # Disable some tests on android platform only. if sys.modules[benchmark.__module__] in _ANDROID_BLACK_LIST_MODULES: method._disabled_strings.add('android') # TODO(bashi): Remove once crrev.com/1266833004 is landed. if benchmark.Name() == 'memory.blink_memory_mobile': method._disabled_strings.add('android') setattr(BenchmarkSmokeTest, benchmark.Name(), method) suite.addTest(BenchmarkSmokeTest(benchmark.Name())) return suite
{ "content_hash": "115743cb77a4f8e5f5b660f25205b011", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 79, "avg_line_length": 38.254901960784316, "alnum_prop": 0.728515291303605, "repo_name": "js0701/chromium-crosswalk", "id": "3fa90291f5c08d39fd4a70e403d81ec99a275ccb", "size": "6016", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "tools/perf/benchmarks/benchmark_smoke_unittest.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
""" Factories """ import pkg_resources ENTRY_POINT = 'cabalgata.factories' def load_factory(name, directory, configuration=None): """ Load a factory and have it initialize in a particular directory :param name: the name of the plugin to load :param directory: the directory where the factory will reside :return: """ for entry_point in pkg_resources.iter_entry_points(ENTRY_POINT): if entry_point.name == name: factory_class = entry_point.load(require=False) return factory_class(directory, configuration) raise KeyError
{ "content_hash": "e29b5797267e1e761f53235213fab2aa", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 71, "avg_line_length": 30.789473684210527, "alnum_prop": 0.6923076923076923, "repo_name": "cabalgata/cabalgata-silla-de-montar", "id": "183cda8ad7c2f772e40199f80f3041b90fa05e3b", "size": "1185", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cabalgata/silla/factories.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "23407" } ], "symlink_target": "" }
import random import nltk def get_features(name): return {'first letter' : name[0], 'second letter': name[1], 'last letter': name[-1], } def get_label(female_file, male_file): list = [] with open(female_file, 'r') as f: for line in f: list.append(line.lower().replace('\n', '')) list2 = [] with open(male_file, 'r') as f: for line in f: list2.append(line.lower().replace('\n', '')) return [(name, 'женщина') for name in list] \ + [(name, 'мужчина') for name in list2] def name(name): label = get_label('female_name.txt', 'male_names.txt') random.shuffle(label) featuresets = [(get_features(n), gender) for (n, gender) in label] train_set, test_set = featuresets[500:], featuresets[:500] train(train_set, test_set, name.lower()) def surname(surname): label = get_label('female_surnames', 'male_surnames') random.shuffle(label) featuresets = [(get_features(n), gender) for (n, gender) in label] train_set, test_set = featuresets[150:], featuresets[:150] train(train_set, test_set, surname.lower()) def train(train_set, test_set, name): classifier = nltk.NaiveBayesClassifier.train(train_set) print("\n%s is classified as %s" % (name, classifier.classify(get_features(name)))) print(nltk.classify.accuracy(classifier, test_set)) classifier.show_most_informative_features(10) if __name__ == '__main__': name('Даша') surname('Семенчюк')
{ "content_hash": "43c724333070e9a71f46a8801cbab2c3", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 87, "avg_line_length": 33.644444444444446, "alnum_prop": 0.6149273447820344, "repo_name": "Korotkikh/TwitterSentimentAnalysis", "id": "60bdddb54d319aab523e3112c7c79ba1b53b6928", "size": "1540", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/gender_classification/gender_class.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "10326" } ], "symlink_target": "" }
import unittest import datetime import operator import pytz import recurly recurly.API_KEY = 'blah' import mocurly.core import mocurly.backend class TestAccount(unittest.TestCase): def setUp(self): self.mocurly_ = mocurly.core.mocurly() self.mocurly_.start() self.base_account_data = { 'account_code': 'blah', 'email': '[email protected]', 'first_name': 'Foo', 'last_name': 'Bar' } self.base_address_data = { 'address1': '123 Jackson St.', 'address2': 'Data City', 'state': 'CA', 'zip': '94105', 'country': 'USA' } self.base_billing_info_data = { 'first_name': 'Foo', 'last_name': 'Bar' } def tearDown(self): self.mocurly_.stop() def test_no_account_retrieve(self): self.assertRaises(recurly.NotFoundError, recurly.Account.get, '1234') def test_simple_account_creation(self): self.assertFalse(mocurly.backend.accounts_backend.has_object(self.base_account_data['account_code'])) recurly.Account(**self.base_account_data).save() # Verify account object exists in backend self.assertTrue(mocurly.backend.accounts_backend.has_object(self.base_account_data['account_code'])) new_account = mocurly.backend.accounts_backend.get_object(self.base_account_data['account_code']) for k, v in self.base_account_data.items(): self.assertEqual(new_account[k], v) self.assertTrue('hosted_login_token' in new_account) # adds a hosted_login_token by default self.assertTrue('created_at' in new_account) # adds a created_at field by default # Verify account has no billing info recurly_account = recurly.Account.get(self.base_account_data['account_code']) self.assertRaises(AttributeError, lambda: recurly_account.billing_info) def test_account_creation_with_address(self): self.assertFalse(mocurly.backend.accounts_backend.has_object(self.base_account_data['account_code'])) self.base_account_data['address'] = recurly.Address(**self.base_address_data) new_account = recurly.Account(**self.base_account_data) new_account.save() # Verify account object exists in backend self.assertTrue(mocurly.backend.accounts_backend.has_object(self.base_account_data['account_code'])) new_account = mocurly.backend.accounts_backend.get_object(self.base_account_data['account_code']) for k, v in self.base_account_data.items(): if k == 'address': address = new_account[k] for address_k, address_v in self.base_address_data.items(): self.assertEqual(address[address_k], address_v) else: self.assertEqual(new_account[k], v) self.assertTrue('hosted_login_token' in new_account) # adds a hosted_login_token by default self.assertTrue('created_at' in new_account) # adds a created_at field by default def test_account_creation_with_billing_info(self): self.assertFalse(mocurly.backend.accounts_backend.has_object(self.base_account_data['account_code'])) self.assertFalse(mocurly.backend.billing_info_backend.has_object(self.base_account_data['account_code'])) self.base_account_data['billing_info'] = recurly.BillingInfo(**self.base_billing_info_data) new_account = recurly.Account(**self.base_account_data) new_account.save() del self.base_account_data['billing_info'] # Verify account object exists in backend self.assertTrue(mocurly.backend.accounts_backend.has_object(self.base_account_data['account_code'])) new_account = mocurly.backend.accounts_backend.get_object(self.base_account_data['account_code']) for k, v in self.base_account_data.items(): self.assertEqual(new_account[k], v) self.assertTrue('hosted_login_token' in new_account) # adds a hosted_login_token by default self.assertTrue('created_at' in new_account) # adds a created_at field by default # Verify billing info object exists in backend self.assertTrue(mocurly.backend.billing_info_backend.has_object(self.base_account_data['account_code'])) new_billing_info = mocurly.backend.billing_info_backend.get_object(self.base_account_data['account_code']) for k, v in self.base_billing_info_data.items(): self.assertEqual(new_billing_info[k], v) def test_simple_get_account(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) account = recurly.Account.get(self.base_account_data['account_code']) for k, v in self.base_account_data.items(): if k in ['uuid', 'uris']: continue # skip if k == 'created_at': self.assertEqual(getattr(account, k), datetime.datetime(2014, 8, 11, 0, 0, tzinfo=pytz.utc)) else: self.assertEqual(getattr(account, k), v) def test_simple_account_update_billing_info(self): # Create a simple account recurly.Account(**self.base_account_data).save() # Verify account has no billing info recurly_account = recurly.Account.get(self.base_account_data['account_code']) self.assertRaises(AttributeError, lambda: recurly_account.billing_info) # Update the billing info using the update_billing_info method billing_info = recurly.BillingInfo(**self.base_billing_info_data) recurly_account.update_billing_info(billing_info) # Verify billing info object exists in backend self.assertTrue(mocurly.backend.billing_info_backend.has_object(self.base_account_data['account_code'])) new_billing_info = mocurly.backend.billing_info_backend.get_object(self.base_account_data['account_code']) for k, v in self.base_billing_info_data.items(): self.assertEqual(new_billing_info[k], v) def test_delete_billing_info(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) self.base_billing_info_data['account'] = self.base_account_data['account_code'] mocurly.backend.billing_info_backend.add_object(self.base_account_data['account_code'], self.base_billing_info_data) self.assertEqual(len(mocurly.backend.accounts_backend.datastore), 1) self.assertEqual(len(mocurly.backend.billing_info_backend.datastore), 1) recurly.Account.get(self.base_account_data['account_code']).billing_info.delete() self.assertEqual(len(mocurly.backend.accounts_backend.datastore), 1) self.assertEqual(len(mocurly.backend.billing_info_backend.datastore), 0) def test_close(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) self.base_billing_info_data['account'] = self.base_account_data['account_code'] mocurly.backend.billing_info_backend.add_object(self.base_account_data['account_code'], self.base_billing_info_data) account = recurly.Account.get(self.base_account_data['account_code']) account.delete() self.assertEqual(len(mocurly.backend.accounts_backend.datastore), 1) # only marks account as closed, but... self.assertEqual(len(mocurly.backend.billing_info_backend.datastore), 0) # billing info should be deleted account = mocurly.backend.accounts_backend.get_object(self.base_account_data['account_code']) self.assertEqual(account['state'], 'closed') def test_address_get_account(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' self.base_account_data['address'] = self.base_address_data mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) account = recurly.Account.get(self.base_account_data['account_code']) for k, v in self.base_account_data.items(): if k in ['uuid', 'uris']: continue # skip if k == 'created_at': self.assertEqual(getattr(account, k), datetime.datetime(2014, 8, 11, 0, 0, tzinfo=pytz.utc)) elif k == 'address': address = getattr(account, k) self.assertEqual(type(address), recurly.Address) for address_k, address_v in v.items(): self.assertEqual(getattr(address, address_k), address_v) else: self.assertEqual(getattr(account, k), v) def test_billing_info_get_account(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) self.base_billing_info_data['account'] = self.base_account_data['account_code'] mocurly.backend.billing_info_backend.add_object(self.base_account_data['account_code'], self.base_billing_info_data) account = recurly.Account.get(self.base_account_data['account_code']) for k, v in self.base_account_data.items(): if k in ['uuid', 'uris']: continue # skip if k == 'created_at': self.assertEqual(getattr(account, k), datetime.datetime(2014, 8, 11, 0, 0, tzinfo=pytz.utc)) else: self.assertEqual(getattr(account, k), v) billing_info = account.billing_info for k, v in self.base_billing_info_data.items(): if k in ['uuid', 'uris', 'account']: continue # skip self.assertEqual(getattr(billing_info, k), v) def test_update_creditcard_billing_info(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) self.base_billing_info_data['account'] = self.base_account_data['account_code'] mocurly.backend.billing_info_backend.add_object(self.base_account_data['account_code'], self.base_billing_info_data) account = recurly.Account.get(self.base_account_data['account_code']) billing_info = account.billing_info billing_info.first_name = 'Verena' billing_info.last_name = 'Example' billing_info.number = '4111-1111-1111-1111' billing_info.verification_value = '123' billing_info.month = 11 billing_info.year = 2015 billing_info.save() self.assertEqual(len(mocurly.backend.billing_info_backend.datastore), 1) billing_info_backed = mocurly.backend.billing_info_backend.get_object(self.base_account_data['account_code']) self.assertEqual(billing_info_backed['first_name'], 'Verena') self.assertEqual(billing_info_backed['last_name'], 'Example') self.assertEqual(billing_info_backed['number'], '4111-1111-1111-1111') self.assertEqual(billing_info_backed['first_six'], '411111') self.assertEqual(billing_info_backed['last_four'], '1111') self.assertEqual(billing_info_backed['verification_value'], '123') self.assertEqual(billing_info_backed['month'], '11') self.assertEqual(billing_info_backed['year'], '2015') def test_update_paypal_billing_info(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) self.base_billing_info_data['account'] = self.base_account_data['account_code'] mocurly.backend.billing_info_backend.add_object(self.base_account_data['account_code'], self.base_billing_info_data) account = recurly.Account.get(self.base_account_data['account_code']) billing_info = account.billing_info billing_info.first_name = 'Verena' billing_info.last_name = 'Example' billing_info.paypal_billing_agreement_id = 'PP-7594' billing_info.save() self.assertEqual(len(mocurly.backend.billing_info_backend.datastore), 1) billing_info_backed = mocurly.backend.billing_info_backend.get_object(self.base_account_data['account_code']) self.assertEqual(billing_info_backed['first_name'], 'Verena') self.assertEqual(billing_info_backed['last_name'], 'Example') self.assertEqual(billing_info_backed['paypal_billing_agreement_id'], 'PP-7594') def test_update_account_with_billing_info(self): # Case 1: account exists, but has no billing data self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) account = recurly.Account.get(self.base_account_data['account_code']) account.company_name = 'Mocurly' account.billing_info = billing_info = recurly.BillingInfo() billing_info.first_name = 'Verena' billing_info.last_name = 'Example' billing_info.number = '4111-1111-1111-1111' billing_info.verification_value = '123' billing_info.month = 11 billing_info.year = 2015 account.save() self.assertEqual(len(mocurly.backend.accounts_backend.datastore), 1) self.assertEqual(len(mocurly.backend.billing_info_backend.datastore), 1) account_backed = mocurly.backend.accounts_backend.get_object(self.base_account_data['account_code']) self.assertEqual(account_backed['company_name'], 'Mocurly') billing_info_backed = mocurly.backend.billing_info_backend.get_object(self.base_account_data['account_code']) self.assertEqual(billing_info_backed['first_name'], 'Verena') self.assertEqual(billing_info_backed['last_name'], 'Example') self.assertEqual(billing_info_backed['number'], '4111-1111-1111-1111') self.assertEqual(billing_info_backed['first_six'], '411111') self.assertEqual(billing_info_backed['last_four'], '1111') self.assertEqual(billing_info_backed['verification_value'], '123') self.assertEqual(billing_info_backed['month'], '11') self.assertEqual(billing_info_backed['year'], '2015') # Case 2: billing data exists account = recurly.Account.get(self.base_account_data['account_code']) account.email = '[email protected]' account.billing_info = billing_info = recurly.BillingInfo() billing_info.last_name = 'Mocurly' account.save() self.assertEqual(len(mocurly.backend.accounts_backend.datastore), 1) self.assertEqual(len(mocurly.backend.billing_info_backend.datastore), 1) account_backed = mocurly.backend.accounts_backend.get_object(self.base_account_data['account_code']) self.assertEqual(account_backed['email'], '[email protected]') billing_info_backed = mocurly.backend.billing_info_backend.get_object(self.base_account_data['account_code']) self.assertEqual(billing_info_backed['last_name'], 'Mocurly') def test_list_account(self): self.base_account_data['hosted_login_token'] = 'abcd1234' self.base_account_data['created_at'] = '2014-08-11' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) self.base_account_data['account_code'] = 'foo' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) self.base_account_data['account_code'] = 'bar' mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) accounts = recurly.Account.all() self.assertEqual(len(accounts), 3) self.assertEqual(set([account.account_code for account in accounts]), set(['foo', 'bar', 'blah'])) def test_invoice_list(self): mocurly.backend.accounts_backend.add_object(self.base_account_data['account_code'], self.base_account_data) base_invoice_data = { 'account': self.base_account_data['account_code'], 'uuid': 'foo', 'state': 'collected', 'invoice_number': '1234', 'subtotal_in_cents': 1000, 'currency': 'USD', 'created_at': '2014-08-11', 'net_terms': 0, 'collection_method': 'automatic', 'tax_type': 'usst', 'tax_rate': 0, 'tax_in_cents': 0, 'total_in_cents': 1000, } mocurly.backend.invoices_backend.add_object('1234', base_invoice_data) base_invoice_data['invoice_number'] = '1235' mocurly.backend.invoices_backend.add_object('1235', base_invoice_data) account = recurly.Account.get(self.base_account_data['account_code']) invoices = account.invoices() self.assertEqual(len(invoices), 2) self.assertEqual(set(map(operator.attrgetter('invoice_number'), invoices)), set([1234, 1235]))
{ "content_hash": "39935f3d3cae2ecee435bc2f1525468c", "timestamp": "", "source": "github", "line_count": 335, "max_line_length": 124, "avg_line_length": 53.38507462686567, "alnum_prop": 0.653377320509953, "repo_name": "Captricity/mocurly", "id": "fa483947f68c13519894986f1c20ba9af6252c7b", "size": "17884", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_account.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "137199" } ], "symlink_target": "" }
"""" Append list and reverse """" def append_list(): count = 10**5 nums = [] for i in range(count): nums.append(i) nums.reverse() """ In [23]: profile.run('append_list()') 200005 function calls in 3.520 CPU seconds """ """ Now implement the same using insert instead of append. Notice the difference in CPU time. """ def append_list_reverse(): count = 10**5 nums = [] for i in range(count): nums.insert(0,i) """ profile.run('append_list_reverse()') 100005 function calls in 3.719 CPU seconds """
{ "content_hash": "fdbfe87e2805417c61b798d4d3897b5b", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 89, "avg_line_length": 18.032258064516128, "alnum_prop": 0.6028622540250447, "repo_name": "manoharp/algo", "id": "301be2f30fa92331254e27080a855ecca0de7374", "size": "559", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "list_append_insert.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "12002" } ], "symlink_target": "" }
from __future__ import annotations import csv import logging import os.path from functools import wraps from html import escape from io import StringIO from json import dumps from itertools import chain from time import time from typing import TYPE_CHECKING, Optional, Any, Dict, List import gevent from flask import Flask, make_response, jsonify, render_template, request, send_file, Response from flask_basicauth import BasicAuth from gevent import pywsgi from .exception import AuthCredentialsError from .runners import MasterRunner, STATE_RUNNING, STATE_MISSING from .log import greenlet_exception_logger from .stats import StatsCSVFileWriter, StatsErrorDict, sort_stats from . import stats as stats_module, __version__ as version, argument_parser from .stats import StatsCSV from .user.inspectuser import get_ratio from .util.cache import memoize from .util.rounding import proper_round from .util.timespan import parse_timespan from .html import get_html_report from flask_cors import CORS if TYPE_CHECKING: from .env import Environment logger = logging.getLogger(__name__) greenlet_exception_handler = greenlet_exception_logger(logger) DEFAULT_CACHE_TIME = 2.0 class WebUI: """ Sets up and runs a Flask web app that can start and stop load tests using the :attr:`environment.runner <locust.env.Environment.runner>` as well as show the load test statistics in :attr:`environment.stats <locust.env.Environment.stats>` """ app: Optional[Flask] = None """ Reference to the :class:`flask.Flask` app. Can be used to add additional web routes and customize the Flask app in other various ways. Example:: from flask import request @web_ui.app.route("/my_custom_route") def my_custom_route(): return "your IP is: %s" % request.remote_addr """ greenlet: Optional[gevent.Greenlet] = None """ Greenlet of the running web server """ server: Optional[pywsgi.WSGIServer] = None """Reference to the :class:`pyqsgi.WSGIServer` instance""" template_args: Dict[str, Any] """Arguments used to render index.html for the web UI. Must be used with custom templates extending index.html.""" def __init__( self, environment: "Environment", host: str, port: int, auth_credentials: Optional[str] = None, tls_cert: Optional[str] = None, tls_key: Optional[str] = None, stats_csv_writer: Optional[StatsCSV] = None, delayed_start=False, userclass_picker_is_active=False, ): """ Create WebUI instance and start running the web server in a separate greenlet (self.greenlet) Arguments: environment: Reference to the current Locust Environment host: Host/interface that the web server should accept connections to port: Port that the web server should listen to auth_credentials: If provided, it will enable basic auth with all the routes protected by default. Should be supplied in the format: "user:pass". tls_cert: A path to a TLS certificate tls_key: A path to a TLS private key delayed_start: Whether or not to delay starting web UI until `start()` is called. Delaying web UI start allows for adding Flask routes or Blueprints before accepting requests, avoiding errors. """ environment.web_ui = self self.stats_csv_writer = stats_csv_writer or StatsCSV(environment, stats_module.PERCENTILES_TO_REPORT) self.environment = environment self.host = host self.port = port self.tls_cert = tls_cert self.tls_key = tls_key self.userclass_picker_is_active = userclass_picker_is_active app = Flask(__name__) CORS(app) self.app = app app.jinja_env.add_extension("jinja2.ext.do") app.debug = True app.root_path = os.path.dirname(os.path.abspath(__file__)) self.app.config["BASIC_AUTH_ENABLED"] = False self.auth: Optional[BasicAuth] = None self.greenlet: Optional[gevent.Greenlet] = None self._swarm_greenlet: Optional[gevent.Greenlet] = None self.template_args = {} if auth_credentials is not None: credentials = auth_credentials.split(":") if len(credentials) == 2: self.app.config["BASIC_AUTH_USERNAME"] = credentials[0] self.app.config["BASIC_AUTH_PASSWORD"] = credentials[1] self.app.config["BASIC_AUTH_ENABLED"] = True self.auth = BasicAuth() self.auth.init_app(self.app) else: raise AuthCredentialsError( "Invalid auth_credentials. It should be a string in the following format: 'user:pass'" ) if environment.runner: self.update_template_args() if not delayed_start: self.start() @app.route("/") @self.auth_required_if_enabled def index() -> str | Response: if not environment.runner: return make_response("Error: Locust Environment does not have any runner", 500) self.update_template_args() return render_template("index.html", **self.template_args) @app.route("/swarm", methods=["POST"]) @self.auth_required_if_enabled def swarm() -> Response: assert request.method == "POST" # Loading UserClasses & ShapeClasses if Locust is running with UserClass Picker if self.userclass_picker_is_active: if not self.environment.available_user_classes: err_msg = "UserClass picker is active but there are no available UserClasses" return jsonify({"success": False, "message": err_msg, "host": environment.host}) # Getting Specified User Classes form_data_user_class_names = request.form.getlist("user_classes") # Updating UserClasses if form_data_user_class_names: user_classes = {} for user_class_name, user_class_object in self.environment.available_user_classes.items(): if user_class_name in form_data_user_class_names: user_classes[user_class_name] = user_class_object else: if self.environment.runner and self.environment.runner.state == STATE_RUNNING: # Test is already running # Using the user classes that have already been selected user_classes = { key: value for (key, value) in self.environment.available_user_classes.items() if value in self.environment.user_classes } else: # Starting test with no user class selection # Defaulting to using all available user classes user_classes = self.environment.available_user_classes self._update_user_classes(user_classes) # Updating ShapeClass if specified in WebUI Form form_data_shape_class_name = request.form.get("shape_class", "Default") if form_data_shape_class_name == "Default": self._update_shape_class(None) else: self._update_shape_class(form_data_shape_class_name) parsed_options_dict = vars(environment.parsed_options) if environment.parsed_options else {} run_time = None for key, value in request.form.items(): if key == "user_count": # if we just renamed this field to "users" we wouldn't need this user_count = int(value) elif key == "spawn_rate": spawn_rate = float(value) elif key == "host": # Replace < > to guard against XSS environment.host = str(request.form["host"]).replace("<", "").replace(">", "") elif key == "user_classes": # Set environment.parsed_options.user_classes to the selected user_classes parsed_options_dict[key] = request.form.getlist("user_classes") elif key == "run_time": if not value: continue try: run_time = parse_timespan(value) except ValueError: err_msg = "Valid run_time formats are : 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc." logger.error(err_msg) return jsonify({"success": False, "message": err_msg, "host": environment.host}) elif key in parsed_options_dict: # update the value in environment.parsed_options, but dont change the type. # This won't work for parameters that are None parsed_options_dict[key] = type(parsed_options_dict[key])(value) if environment.shape_class and environment.runner is not None: environment.runner.start_shape() return jsonify( {"success": True, "message": "Swarming started using shape class", "host": environment.host} ) if self._swarm_greenlet is not None: self._swarm_greenlet.kill(block=True) self._swarm_greenlet = None if environment.runner is not None: self._swarm_greenlet = gevent.spawn(environment.runner.start, user_count, spawn_rate) self._swarm_greenlet.link_exception(greenlet_exception_handler) response_data = { "success": True, "message": "Swarming started", "host": environment.host, } if run_time: gevent.spawn_later(run_time, self._stop_runners).link_exception(greenlet_exception_handler) response_data["run_time"] = run_time if self.userclass_picker_is_active: response_data["user_classes"] = sorted(user_classes.keys()) return jsonify(response_data) else: return jsonify({"success": False, "message": "No runner", "host": environment.host}) @app.route("/stop") @self.auth_required_if_enabled def stop() -> Response: if self._swarm_greenlet is not None: self._swarm_greenlet.kill(block=True) self._swarm_greenlet = None if environment.runner is not None: environment.runner.stop() return jsonify({"success": True, "message": "Test stopped"}) @app.route("/stats/reset") @self.auth_required_if_enabled def reset_stats() -> str: environment.events.reset_stats.fire() if environment.runner is not None: environment.runner.stats.reset_all() environment.runner.exceptions = {} return "ok" @app.route("/stats/report") @self.auth_required_if_enabled def stats_report() -> Response: res = get_html_report(self.environment, show_download_link=not request.args.get("download")) if request.args.get("download"): res = app.make_response(res) res.headers["Content-Disposition"] = f"attachment;filename=report_{time()}.html" return res def _download_csv_suggest_file_name(suggest_filename_prefix: str) -> str: """Generate csv file download attachment filename suggestion. Arguments: suggest_filename_prefix: Prefix of the filename to suggest for saving the download. Will be appended with timestamp. """ return f"{suggest_filename_prefix}_{time()}.csv" def _download_csv_response(csv_data: str, filename_prefix: str) -> Response: """Generate csv file download response with 'csv_data'. Arguments: csv_data: CSV header and data rows. filename_prefix: Prefix of the filename to suggest for saving the download. Will be appended with timestamp. """ response = make_response(csv_data) response.headers["Content-type"] = "text/csv" response.headers[ "Content-disposition" ] = f"attachment;filename={_download_csv_suggest_file_name(filename_prefix)}" return response @app.route("/stats/requests/csv") @self.auth_required_if_enabled def request_stats_csv() -> Response: data = StringIO() writer = csv.writer(data) self.stats_csv_writer.requests_csv(writer) return _download_csv_response(data.getvalue(), "requests") @app.route("/stats/requests_full_history/csv") @self.auth_required_if_enabled def request_stats_full_history_csv() -> Response: options = self.environment.parsed_options if options and options.stats_history_enabled and isinstance(self.stats_csv_writer, StatsCSVFileWriter): return send_file( os.path.abspath(self.stats_csv_writer.stats_history_file_name()), mimetype="text/csv", as_attachment=True, download_name=_download_csv_suggest_file_name("requests_full_history"), etag=True, max_age=0, conditional=True, last_modified=None, ) return make_response("Error: Server was not started with option to generate full history.", 404) @app.route("/stats/failures/csv") @self.auth_required_if_enabled def failures_stats_csv() -> Response: data = StringIO() writer = csv.writer(data) self.stats_csv_writer.failures_csv(writer) return _download_csv_response(data.getvalue(), "failures") @app.route("/stats/requests") @self.auth_required_if_enabled @memoize(timeout=DEFAULT_CACHE_TIME, dynamic_timeout=True) def request_stats() -> Response: stats: List[Dict[str, Any]] = [] errors: List[StatsErrorDict] = [] if environment.runner is None: report = { "stats": stats, "errors": errors, "total_rps": 0.0, "fail_ratio": 0.0, "current_response_time_percentile_95": None, "current_response_time_percentile_50": None, "state": STATE_MISSING, "user_count": 0, } if isinstance(environment.runner, MasterRunner): report.update({"workers": []}) return jsonify(report) for s in chain(sort_stats(environment.runner.stats.entries), [environment.runner.stats.total]): stats.append( { "method": s.method, "name": s.name, "safe_name": escape(s.name, quote=False), "num_requests": s.num_requests, "num_failures": s.num_failures, "avg_response_time": s.avg_response_time, "min_response_time": 0 if s.min_response_time is None else proper_round(s.min_response_time), "max_response_time": proper_round(s.max_response_time), "current_rps": s.current_rps, "current_fail_per_sec": s.current_fail_per_sec, "median_response_time": s.median_response_time, "ninetieth_response_time": s.get_response_time_percentile(0.9), "ninety_ninth_response_time": s.get_response_time_percentile(0.99), "avg_content_length": s.avg_content_length, } ) for e in environment.runner.errors.values(): err_dict = e.serialize() err_dict["name"] = escape(err_dict["name"]) err_dict["error"] = escape(err_dict["error"]) errors.append(err_dict) # Truncate the total number of stats and errors displayed since a large number of rows will cause the app # to render extremely slowly. Aggregate stats should be preserved. truncated_stats = stats[:500] if len(stats) > 500: truncated_stats += [stats[-1]] report = {"stats": truncated_stats, "errors": errors[:500]} if stats: report["total_rps"] = stats[len(stats) - 1]["current_rps"] report["fail_ratio"] = environment.runner.stats.total.fail_ratio report[ "current_response_time_percentile_95" ] = environment.runner.stats.total.get_current_response_time_percentile(0.95) report[ "current_response_time_percentile_50" ] = environment.runner.stats.total.get_current_response_time_percentile(0.5) if isinstance(environment.runner, MasterRunner): workers = [] for worker in environment.runner.clients.values(): workers.append( { "id": worker.id, "state": worker.state, "user_count": worker.user_count, "cpu_usage": worker.cpu_usage, "memory_usage": worker.memory_usage, } ) report["workers"] = workers report["state"] = environment.runner.state report["user_count"] = environment.runner.user_count return jsonify(report) @app.route("/exceptions") @self.auth_required_if_enabled def exceptions() -> Response: return jsonify( { "exceptions": [ { "count": row["count"], "msg": escape(row["msg"]), "traceback": escape(row["traceback"]), "nodes": ", ".join(row["nodes"]), } for row in (environment.runner.exceptions.values() if environment.runner is not None else []) ] } ) @app.route("/exceptions/csv") @self.auth_required_if_enabled def exceptions_csv() -> Response: data = StringIO() writer = csv.writer(data) self.stats_csv_writer.exceptions_csv(writer) return _download_csv_response(data.getvalue(), "exceptions") @app.route("/tasks") @self.auth_required_if_enabled def tasks() -> Dict[str, Dict[str, Dict[str, float]]]: runner = self.environment.runner user_spawned: Dict[str, int] if runner is None: user_spawned = {} else: user_spawned = ( runner.reported_user_classes_count if isinstance(runner, MasterRunner) else runner.user_classes_count ) task_data = { "per_class": get_ratio(self.environment.user_classes, user_spawned, False), "total": get_ratio(self.environment.user_classes, user_spawned, True), } return task_data def start(self): self.greenlet = gevent.spawn(self.start_server) self.greenlet.link_exception(greenlet_exception_handler) def start_server(self): if self.tls_cert and self.tls_key: self.server = pywsgi.WSGIServer( (self.host, self.port), self.app, log=None, keyfile=self.tls_key, certfile=self.tls_cert ) else: self.server = pywsgi.WSGIServer((self.host, self.port), self.app, log=None) self.server.serve_forever() def stop(self): """ Stop the running web server """ self.server.stop() def auth_required_if_enabled(self, view_func): """ Decorator that can be used on custom route methods that will turn on Basic Auth authentication if the ``--web-auth`` flag is used. Example:: @web_ui.app.route("/my_custom_route") @web_ui.auth_required_if_enabled def my_custom_route(): return "custom response" """ @wraps(view_func) def wrapper(*args, **kwargs): if self.app.config["BASIC_AUTH_ENABLED"]: if self.auth.authenticate(): return view_func(*args, **kwargs) else: return self.auth.challenge() else: return view_func(*args, **kwargs) return wrapper def update_template_args(self): override_host_warning = False if self.environment.host: host = self.environment.host elif self.environment.runner.user_classes: all_hosts = {l.host for l in self.environment.runner.user_classes} if len(all_hosts) == 1: host = list(all_hosts)[0] else: # since we have multiple User classes with different host attributes, we'll # inform that specifying host will override the host for all User classes override_host_warning = True host = None else: host = None options = self.environment.parsed_options is_distributed = isinstance(self.environment.runner, MasterRunner) if is_distributed: worker_count = self.environment.runner.worker_count else: worker_count = 0 stats = self.environment.runner.stats extra_options = argument_parser.ui_extra_args_dict() available_user_classes = ( None if not self.environment.available_user_classes else sorted(self.environment.available_user_classes) ) available_shape_classes = ["Default"] if self.environment.available_shape_classes: available_shape_classes += sorted(self.environment.available_shape_classes.keys()) self.template_args = { "locustfile": self.environment.locustfile, "state": self.environment.runner.state, "is_distributed": is_distributed, "user_count": self.environment.runner.user_count, "version": version, "host": host, "history": stats.history if stats.num_requests > 0 else {}, "override_host_warning": override_host_warning, "num_users": options and options.num_users, "spawn_rate": options and options.spawn_rate, "worker_count": worker_count, "is_shape": self.environment.shape_class and not self.userclass_picker_is_active, "stats_history_enabled": options and options.stats_history_enabled, "tasks": dumps({}), "extra_options": extra_options, "show_userclass_picker": self.userclass_picker_is_active, "available_user_classes": available_user_classes, "available_shape_classes": available_shape_classes, } def _update_shape_class(self, shape_class_name): if shape_class_name: shape_class = self.environment.available_shape_classes[shape_class_name] else: shape_class = None # Validating ShapeClass self.environment.shape_class = shape_class self.environment._validate_shape_class_instance() def _update_user_classes(self, user_classes): self.environment.user_classes = list(user_classes.values()) # populate the locustfile which used in web ui title only if self.environment.locustfile is None: self.environment.locustfile = ",".join(self.environment.user_classes_by_name.keys()) # Validating UserClasses self.environment._remove_user_classes_with_weight_zero() self.environment._validate_user_class_name_uniqueness() def _stop_runners(self): self.environment.runner.stop()
{ "content_hash": "e1f55324416ac9ac2f1548625208796b", "timestamp": "", "source": "github", "line_count": 581, "max_line_length": 128, "avg_line_length": 42.714285714285715, "alnum_prop": 0.5644920820405367, "repo_name": "locustio/locust", "id": "673317ff096c277c12fe9a04bfe96cc51b841852", "size": "24817", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "locust/web.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "733" }, { "name": "HTML", "bytes": "33145" }, { "name": "JavaScript", "bytes": "17309" }, { "name": "Makefile", "bytes": "436" }, { "name": "Python", "bytes": "914443" }, { "name": "Sass", "bytes": "10379" }, { "name": "Shell", "bytes": "3452" } ], "symlink_target": "" }
""" Display tasks in thunderbird calendar. Configuration parameters: cache_timeout: how often we refresh usage in seconds (default 120) err_exception: error message when an exception is raised (default 'error: calendar parsing failed') err_profile: error message regarding profile path and read access (default 'error: profile not readable') format: see placeholders below (default 'tasks:[{due}] current:{current}') profile_path: path to the user thunderbird profile (not optional) (default '') Format of status string placeholders: {completed} completed tasks {current} title of current running task (sorted by priority and stamp) {due} due tasks Make sure to configure profile_path in your i3status config using the full path or this module will not be able to retrieve any information from your calendar. ex: profile_path = "/home/user/.thunderbird/1yawevtp.default" @author mrt-prodz """ from sqlite3 import connect from os import access, R_OK from time import time class Py3status: # available configuration parameters cache_timeout = 120 err_exception = 'error: calendar parsing failed' err_profile = 'error: profile not readable' format = 'tasks:[{due}] current:{current}' profile_path = '' def _response(self, text, color=None): response = { 'cached_until': time() + self.cache_timeout, 'full_text': text, } if color is not None: response['color'] = color return response # return calendar data def get_calendar(self, i3s_output_list, i3s_config): _err_color = i3s_config['color_bad'] db = self.profile_path + '/calendar-data/local.sqlite' if not access(db, R_OK): return self._response(self.err_profile, _err_color) try: con = connect(db) cur = con.cursor() cur.execute('SELECT title, todo_completed FROM cal_todos ' 'ORDER BY priority DESC, todo_stamp DESC') tasks = cur.fetchall() con.close() # task[0] is the task name, task[1] is the todo_completed column duetasks = [task[0] for task in tasks if task[1] is None] due = len(duetasks) completed = len(tasks) - due current = duetasks[0] if due else '' return self._response( self.format.format( due=due, completed=completed, current=current)) except Exception: return self._response(self.err_exception, _err_color) if __name__ == "__main__": x = Py3status() config = { 'color_good': '#00FF00', 'color_degraded': '#00FFFF', 'color_bad': '#FF0000' } print(x.get_calendar([], config))
{ "content_hash": "1571f7347eb379570d5ec31c652d5383", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 76, "avg_line_length": 32.10227272727273, "alnum_prop": 0.6173451327433628, "repo_name": "alexoneill/py3status", "id": "24e2ead4fdb6a0467a2335f0232ddaeaae7adf8e", "size": "2849", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "py3status/modules/thunderbird_calendar.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "814174" } ], "symlink_target": "" }
import os from kaira.app import App from kaira.response import response app = App() app.static('/static/', os.path.join(os.path.dirname(__file__), "static")) @app.route("/") def hello_world(request): return response.html('<h1>Hello World!</h1> <p><img src="/static/photo2.png" width="400" height="300" /></p>') if __name__ == '__main__': app.run(debug=True, host="0.0.0.0", port=8000)
{ "content_hash": "45804526b211e35b5652b95fe74fd236", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 114, "avg_line_length": 23.529411764705884, "alnum_prop": 0.6375, "repo_name": "mulonemartin/kaira", "id": "ea1fcae27b9f0c8d70e789a8c7d5d1890900eb0a", "size": "400", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/static.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "118899" } ], "symlink_target": "" }
""" A library of plugins which other plugins can activate by importing. Can also be activated directly By adding it as a plugin package. """
{ "content_hash": "5e4f1685c6b4a7b9eb863a41fff49b8e", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 98, "avg_line_length": 35.25, "alnum_prop": 0.7659574468085106, "repo_name": "lahwran/crow2", "id": "1878c6415ba44c81fe761eea6e2276a7fe5ba21a", "size": "141", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "crow2/lib/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "156651" } ], "symlink_target": "" }
import MySQLdb class mysqldb(): def __init__(self): self.conn = MySQLdb.connect( host='localhost', user='root', passwd='passwd') self.cursor = self.conn.cursor() self.conn.select_db('test') self.cursor.execute("SET NAMES 'utf8'") self.conn.commit() def createdb(self): """docstring for createdb""" self.cursor.execute("""create database if not exists test""") self.conn.select_db('test') self.cursor.execute( """create table testpy(id int, info varchar(100)) """) def insertdb(self): """docstring for insertdb""" value = [-1, "inserted"] self.cursor.execute("""insert into testpy values(%s,%s)""", value) values = [] for i in range(20): values.append((i, '客户' + str(i))) self.cursor.executemany("""insert into testpy values(%s,%s)""", values) def delete(self, identity): """delete record with target id""" self.cursor.execute("""delete from testpy where id=%s""", (identity,)) def select(self, page=10): """docstring for select""" self.cursor.execute("""select id, info from testpy""") # for testpy_id, testpy_info in self.cursor.fetchall(): result = self.cursor.fetchmany(page) while result: for testpy_id, testpy_info in result: print testpy_id, " => ", testpy_info result = self.cursor.fetchmany(page) print "------------NEXT PAGE----------------" def __del__(self): self.cursor.close() self.conn.commit() self.conn.close() def main(): """docstring for main """ obj = mysqldb() # obj.createdb() obj.insertdb() obj.delete(19) obj.select(5) del obj if __name__ == "__main__": main()
{ "content_hash": "b9347422fa8fe1228ff8c85ff463906e", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 79, "avg_line_length": 30.147540983606557, "alnum_prop": 0.5486677542142468, "repo_name": "quchunguang/test", "id": "3c600640cc054031c3fab72e0344d0cb2d5961e9", "size": "1890", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "testpy/mysqldb.py", "mode": "33261", "license": "mit", "language": [ { "name": "ActionScript", "bytes": "1086" }, { "name": "Assembly", "bytes": "71339" }, { "name": "Awk", "bytes": "1033" }, { "name": "Batchfile", "bytes": "571" }, { "name": "C", "bytes": "1063602" }, { "name": "C++", "bytes": "309142" }, { "name": "CSS", "bytes": "22567" }, { "name": "CoffeeScript", "bytes": "5429" }, { "name": "Common Lisp", "bytes": "941" }, { "name": "Fortran", "bytes": "21095" }, { "name": "Gnuplot", "bytes": "11868" }, { "name": "Go", "bytes": "14507" }, { "name": "HCL", "bytes": "21381" }, { "name": "HTML", "bytes": "788820" }, { "name": "Java", "bytes": "947462" }, { "name": "JavaScript", "bytes": "11208" }, { "name": "Lex", "bytes": "8920" }, { "name": "M", "bytes": "14447" }, { "name": "M4", "bytes": "550" }, { "name": "Makefile", "bytes": "123588" }, { "name": "Mathematica", "bytes": "3808649" }, { "name": "Matlab", "bytes": "99775" }, { "name": "Objective-C", "bytes": "18954" }, { "name": "OpenEdge ABL", "bytes": "5002" }, { "name": "PHP", "bytes": "80666" }, { "name": "PLpgSQL", "bytes": "399" }, { "name": "Perl", "bytes": "350" }, { "name": "PostScript", "bytes": "9049" }, { "name": "Python", "bytes": "521668" }, { "name": "QMake", "bytes": "258" }, { "name": "R", "bytes": "67" }, { "name": "Roff", "bytes": "1331" }, { "name": "Scala", "bytes": "1467" }, { "name": "Scheme", "bytes": "68" }, { "name": "Shell", "bytes": "551111" }, { "name": "SuperCollider", "bytes": "26339" }, { "name": "TeX", "bytes": "6604" }, { "name": "Yacc", "bytes": "23335" } ], "symlink_target": "" }
""" The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function # Author: Alexandre Gramfort <[email protected]>, # Gael Varoquaux <[email protected]> # Andreas Mueller <[email protected]> # Olivier Grisel <[email protected]> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, Sized from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from .base import BaseEstimator, is_classifier, clone from .base import MetaEstimatorMixin, ChangedBehaviorWarning from .cross_validation import check_cv from .cross_validation import _fit_and_score from .externals.joblib import Parallel, delayed from .externals import six from .utils import check_random_state from .utils.random import sample_without_replacement from .utils.validation import _num_samples, indexable from .utils.metaestimators import if_delegate_has_method from .metrics.scorer import check_scoring __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.grid_search import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: uses ``ParameterGrid`` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.grid_search import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d." % (grid_size, self.n_iter) + " For exhaustive searches, use GridSearchCV.") for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): params[k] = v.rvs() else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for v in p.values(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") check = [isinstance(v, k) for k in (list, tuple, np.ndarray)] if True not in check: raise ValueError("Parameter values should be a list.") if len(v) == 0: raise ValueError("Parameter values should be a non-empty " "list.") class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score @property def _estimator_type(self): return self.estimator._estimator_type def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Notes ----- * The long-standing behavior of this method changed in version 0.16. * It no longer uses the metric provided by ``estimator.score`` if the ``scoring`` parameter was set when fitting. """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) if self.scoring is not None and hasattr(self.best_estimator_, 'score'): warnings.warn("The long-standing behavior to use the estimator's " "score function in {0}.score has changed. The " "scoring parameter is now used." "".format(self.__class__.__name__), ChangedBehaviorWarning) return self.scorer_(self.best_estimator_, X, y) @if_delegate_has_method(delegate='estimator') def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict(X) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate='estimator') def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate='estimator') def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(X) @if_delegate_has_method(delegate='estimator') def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found parameters. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(Xt) def _fit(self, X, y, parameter_iterable): """Actual fitting, performing the search over parameters.""" estimator = self.estimator cv = self.cv self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) n_samples = _num_samples(X) X, y = indexable(X, y) if y is not None: if len(y) != n_samples: raise ValueError('Target variable (y) has a different number ' 'of samples (%i) than data (X: %i samples)' % (len(y), n_samples)) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) if self.verbose > 0: if isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(len(cv), n_candidates, n_candidates * len(cv))) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )( delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv) # Out is a list of triplet: score, estimator, n_test_samples n_fits = len(out) n_folds = len(cv) scores = list() grid_scores = list() for grid_start in range(0, n_fits, n_folds): n_test_samples = 0 score = 0 all_scores = [] for this_score, this_n_test_samples, _, parameters in \ out[grid_start:grid_start + n_folds]: all_scores.append(this_score) if self.iid: this_score *= this_n_test_samples n_test_samples += this_n_test_samples score += this_score if self.iid: score /= float(n_test_samples) else: score /= float(n_folds) scores.append((score, parameters)) # TODO: shall we also store the test_fold_sizes? grid_scores.append(_CVScoreTuple( parameters, score, np.array(all_scores))) # Store the computed scores self.grid_scores_ = grid_scores # Find the best parameters by comparing on the mean validation score: # note that `sorted` is deterministic in the way it breaks ties best = sorted(grid_scores, key=lambda x: x.mean_validation_score, reverse=True)[0] self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best.parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. Important members are fit, predict. GridSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated grid-search over a parameter grid. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. .. versionchanged:: 0.17 Upgraded to joblib 0.9.3. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Examples -------- >>> from sklearn import svm, grid_search, datasets >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svr = svm.SVC() >>> clf = grid_search.GridSearchCV(svr, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape=None, degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., scoring=..., verbose=...) Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a an hyperparameter grid. :func:`sklearn.cross_validation.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(GridSearchCV, self).__init__( estimator, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch, error_score) self.param_grid = param_grid _check_param_grid(param_grid) def fit(self, X, y=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ return self._fit(X, y, ParameterGrid(self.param_grid)) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. RandomizedSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settins, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) def fit(self, X, y=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, sampled_params)
{ "content_hash": "39f8319105c8fce0b8ee6f54ae02fb0d", "timestamp": "", "source": "github", "line_count": 996, "max_line_length": 87, "avg_line_length": 38.31325301204819, "alnum_prop": 0.608045073375262, "repo_name": "nelango/ViralityAnalysis", "id": "6171fcec020d8666987350ac05a12373c261b8dc", "size": "38160", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "model/lib/sklearn/grid_search.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1382177" }, { "name": "Java", "bytes": "18805" }, { "name": "JavaScript", "bytes": "10958" }, { "name": "Python", "bytes": "17814735" } ], "symlink_target": "" }
import os from setuptools import setup # from distutils.core import setup name = "dyconnmap" rootdir = os.path.abspath(os.path.dirname(__file__)) packages = [] for dirname, dirnames, filenames in os.walk(name): if "__init__.py" in filenames: packages.append(dirname.replace("/", ".")) data_files = [] for extra_dirs in ("docs", "examples", "tests"): for dirname, dirnames, filenames in os.walk(extra_dirs): fileslist = [] for filename in filenames: fullname = os.path.join(dirname, filename) fileslist.append(fullname) data_files.append(("share/" + name + "/" + dirname, fileslist)) setup( name="dyconnmap", version="v1.0.4", description="A dynamic connectome mapping module in Python", author="Avraam Marimpis, Stavros Dimitriadis", author_email="[email protected], [email protected]", license="BSD", keywords="eeg fMRI meg connectivity graphs neuroimage brain", url="https://github.com/makism/dyconnmap", python_requires="~=3.6,!=3.7,>=3.8", packages=packages, install_requires=[ "numpy", "scipy", "networkx", "matplotlib", "statsmodels", "scikit-learn", "bctpy", ], package_dir={"dyconnmap": "dyconnmap"}, data_files=data_files, classifiers=[ "Development Status :: 5 - Production/Stable", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Intended Audience :: Science/Research", "Intended Audience :: Developers", "License :: OSI Approved", "Topic :: Software Development", "Topic :: Scientific/Engineering", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Operating System :: Unix", "Operating System :: MacOS", ], )
{ "content_hash": "1460b60a5f6948de0e56ada4edc11bfd", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 71, "avg_line_length": 32.166666666666664, "alnum_prop": 0.6082901554404145, "repo_name": "makism/dyfunconn", "id": "68703b042df4baaafe54d26df3eba52f3845168b", "size": "2037", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Jupyter Notebook", "bytes": "486840" }, { "name": "Python", "bytes": "243964" } ], "symlink_target": "" }
from __future__ import absolute_import import os import re import sys import six import time import logging import posixpath from symsynd.demangle import demangle_symbol from sentry.models import Project, EventError from sentry.plugins import Plugin2 from sentry.lang.native.symbolizer import Symbolizer, SymbolicationFailed from sentry.lang.native.utils import find_all_stacktraces, \ find_apple_crash_report_referenced_images, get_sdk_from_event, \ find_stacktrace_referenced_images, get_sdk_from_apple_system_info, \ APPLE_SDK_MAPPING from sentry.utils.native import parse_addr logger = logging.getLogger(__name__) model_re = re.compile(r'^(\S+?)\d') APP_BUNDLE_PATHS = ( '/var/containers/Bundle/Application/', '/private/var/containers/Bundle/Application/', ) SIM_PATH = '/Developer/CoreSimulator/Devices/' SIM_APP_PATH = '/Containers/Bundle/Application/' NON_APP_FRAMEWORKS = ( '/Frameworks/libswiftCore.dylib', ) SIGNAL_NAMES = { 1: 'SIGHUP', 2: 'SIGINT', 3: 'SIGQUIT', 4: 'SIGILL', 5: 'SIGTRAP', 6: 'SIGABRT', 7: 'SIGEMT', 8: 'SIGFPE', 9: 'SIGKILL', 10: 'SIGBUS', 11: 'SIGSEGV', 12: 'SIGSYS', 13: 'SIGPIPE', 14: 'SIGALRM', 15: 'SIGTERM', 16: 'SIGURG', 17: 'SIGSTOP', 18: 'SIGTSTP', 19: 'SIGCONT', 20: 'SIGCHLD', 21: 'SIGTTIN', 22: 'SIGTTOU', 24: 'SIGXCPU', 25: 'SIGXFSZ', 26: 'SIGVTALRM', 27: 'SIGPROF', 28: 'SIGWINCH', 29: 'SIGINFO', 31: 'SIGUSR2', } def append_error(data, err): data.setdefault('errors', []).append(err) def process_posix_signal(data): signal = data.get('signal', -1) signal_name = data.get('name') if signal_name is None: signal_name = SIGNAL_NAMES.get(signal) return { 'signal': signal, 'name': signal_name, 'code': data.get('code'), 'code_name': data.get('code_name'), } def exception_from_apple_error_or_diagnosis(error, diagnosis=None): rv = {} error = error or {} mechanism = {} if 'mach' in error: mechanism['mach_exception'] = error['mach'] if 'signal' in error: mechanism['posix_signal'] = process_posix_signal(error['signal']) if mechanism: mechanism.setdefault('type', 'cocoa') rv['mechanism'] = mechanism # Start by getting the error from nsexception if error: nsexception = error.get('nsexception') if nsexception: rv['type'] = nsexception['name'] if 'value' in nsexception: rv['value'] = nsexception['value'] # If we don't have an error yet, try to build one from reason and # diagnosis if 'value' not in rv: if 'reason' in error: rv['value'] = error['reason'] elif 'diagnosis' in error: rv['value'] = error['diagnosis'] elif 'mach_exception' in mechanism: rv['value'] = mechanism['mach_exception'] \ .get('exception_name') or 'Mach Exception' elif 'posix_signal' in mechanism: rv['value'] = mechanism['posix_signal'] \ .get('name') or 'Posix Signal' else: rv['value'] = 'Unknown' # Figure out a reasonable type if 'type' not in rv: if 'mach_exception' in mechanism: rv['type'] = 'MachException' elif 'posix_signal' in mechanism: rv['type'] = 'Signal' else: rv['type'] = 'Unknown' if rv: return rv def is_in_app(frame, app_uuid=None): if app_uuid is not None: frame_uuid = frame.get('uuid') if frame_uuid == app_uuid: return True fn = frame.get('package') or '' if not (fn.startswith(APP_BUNDLE_PATHS) or (SIM_PATH in fn and SIM_APP_PATH in fn)): return False if fn.endswith(NON_APP_FRAMEWORKS): return False return True def convert_stacktrace(frames, system=None, notable_addresses=None): app_uuid = None if system: app_uuid = system.get('app_uuid') if app_uuid is not None: app_uuid = app_uuid.lower() converted_frames = [] for frame in reversed(frames): fn = frame.get('filename') # We only record the offset if we found a symbol but we did not # find a line number. In that case it's the offset in bytes from # the beginning of the symbol. function = frame.get('symbol_name') or '<unknown>' lineno = frame.get('line') offset = None if not lineno: offset = frame['instruction_addr'] - frame['symbol_addr'] cframe = { 'abs_path': fn, 'filename': fn and posixpath.basename(fn) or None, # This can come back as `None` from the symbolizer, in which # case we need to fill something else in or we will fail # later fulfill the interface requirements which say that a # function needs to be provided. 'function': function, 'package': frame.get('object_name'), 'symbol_addr': '0x%x' % frame['symbol_addr'], 'instruction_addr': '0x%x' % frame['instruction_addr'], 'instruction_offset': offset, 'lineno': lineno, } cframe['in_app'] = is_in_app(cframe, app_uuid) converted_frames.append(cframe) if converted_frames and notable_addresses: converted_frames[-1]['vars'] = notable_addresses if converted_frames: return {'frames': converted_frames} def inject_apple_backtrace(data, frames, diagnosis=None, error=None, system=None, notable_addresses=None, thread_id=None): stacktrace = convert_stacktrace(frames, system, notable_addresses) if error or diagnosis: error = error or {} exc = exception_from_apple_error_or_diagnosis(error, diagnosis) if exc is not None: exc['stacktrace'] = stacktrace exc['thread_id'] = thread_id data['sentry.interfaces.Exception'] = {'values': [exc]} # Since we inject the exception late we need to make sure that # we set the event type to error as it would be set to # 'default' otherwise. data['type'] = 'error' return True data['sentry.interfaces.Stacktrace'] = stacktrace return False def inject_apple_device_data(data, system): contexts = data.setdefault('contexts', {}) device = contexts.setdefault('device', {}) os = contexts.setdefault('os', {}) try: os['name'] = APPLE_SDK_MAPPING[system['system_name']] except LookupError: os['name'] = system.get('system_name') or 'Generic Apple' if 'system_version' in system: os['version'] = system['system_version'] if 'os_version' in system: os['build'] = system['os_version'] if 'kernel_version' in system: os['kernel_version'] = system['kernel_version'] if 'jailbroken' in system: os['rooted'] = system['jailbroken'] if 'cpu_arch' in system: device['arch'] = system['cpu_arch'] if 'model' in system: device['model_id'] = system['model'] if 'machine' in system: device['model'] = system['machine'] match = model_re.match(system['machine']) if match is not None: device['family'] = match.group(1) def dump_crash_report(report): import json with open('/tmp/sentry-apple-crash-report-%s.json' % time.time(), 'w') as f: json.dump(report, f, indent=2) def preprocess_apple_crash_event(data): """This processes the "legacy" AppleCrashReport.""" crash_report = data['sentry.interfaces.AppleCrashReport'] if os.environ.get('SENTRY_DUMP_APPLE_CRASH_REPORT') == '1': dump_crash_report(crash_report) project = Project.objects.get_from_cache( id=data['project'], ) system = None errors = [] threads = [] crash = crash_report['crash'] crashed_thread = None threads = {} raw_threads = {} for raw_thread in crash['threads']: if raw_thread['crashed'] and raw_thread.get('backtrace'): crashed_thread = raw_thread raw_threads[raw_thread['index']] = raw_thread threads[raw_thread['index']] = { 'id': raw_thread['index'], 'name': raw_thread.get('name'), 'current': raw_thread.get('current_thread', False), 'crashed': raw_thread.get('crashed', False), } sdk_info = get_sdk_from_apple_system_info(system) referenced_images = find_apple_crash_report_referenced_images( crash_report['binary_images'], raw_threads.values()) sym = Symbolizer(project, crash_report['binary_images'], referenced_images=referenced_images) with sym: if crashed_thread is None: append_error(data, { 'type': EventError.NATIVE_NO_CRASHED_THREAD, }) else: system = crash_report.get('system') try: bt, errors = sym.symbolize_backtrace( crashed_thread['backtrace']['contents'], sdk_info) for error in errors: append_error(data, error) if inject_apple_backtrace(data, bt, crash.get('diagnosis'), crash.get('error'), system, crashed_thread.get('notable_addresses'), crashed_thread['index']): # We recorded an exception, so in this case we can # skip having the stacktrace. threads[crashed_thread['index']]['stacktrace'] = None except Exception: logger.exception('Failed to symbolicate') errors.append({ 'type': EventError.NATIVE_INTERNAL_FAILURE, 'error': 'The symbolicator encountered an internal failure', }) for thread in six.itervalues(threads): # If we were told to skip the stacktrace, skip it indeed if thread.get('stacktrace', Ellipsis) is None: continue raw_thread = raw_threads.get(thread['id']) if raw_thread is None or not raw_thread.get('backtrace'): continue bt, errors = sym.symbolize_backtrace( raw_thread['backtrace']['contents'], sdk_info) for error in errors: append_error(data, error) thread['stacktrace'] = convert_stacktrace( bt, system, raw_thread.get('notable_addresses')) if threads: data['threads'] = { 'values': sorted(threads.values(), key=lambda x: x['id']), } if system: inject_apple_device_data(data, system) return data def resolve_frame_symbols(data): debug_meta = data['debug_meta'] debug_images = debug_meta['images'] sdk_info = get_sdk_from_event(data) stacktraces = find_all_stacktraces(data) if not stacktraces: return project = Project.objects.get_from_cache( id=data['project'], ) errors = [] referenced_images = find_stacktrace_referenced_images( debug_images, [x[0] for x in stacktraces]) sym = Symbolizer(project, debug_images, referenced_images=referenced_images) frame = None idx = -1 def report_error(exc_type, exc_value, tb): if exc_value.is_user_fixable or exc_value.is_sdk_failure: errors.append({ 'type': EventError.NATIVE_INTERNAL_FAILURE, 'frame': frame, 'error': u'frame #%d: %s' % (idx, exc_value) }) if not exc_value.is_user_fixable: logger.debug('Failed to symbolicate', exc_info=(exc_type, exc_value, tb)) with sym: for stacktrace, container in stacktraces: store_raw = False new_frames = list(stacktrace['frames']) for idx, frame in enumerate(stacktrace['frames']): if 'image_addr' not in frame or \ 'instruction_addr' not in frame or \ 'symbol_addr' not in frame: continue try: # Construct a raw frame that is used by the symbolizer # backend. raw_frame = { 'object_name': frame.get('package'), 'object_addr': frame['image_addr'], 'instruction_addr': frame['instruction_addr'], 'symbol_addr': frame['symbol_addr'], } new_frame = dict(frame) try: sfrm = sym.symbolize_frame(raw_frame, sdk_info) except SymbolicationFailed: report_error(*sys.exc_info()) else: symbol = sfrm.get('symbol_name') or \ new_frame.get('function') or '<unknown>' function = demangle_symbol(symbol, simplified=True) new_frame['function'] = function # If we demangled something, store the original in the # symbol portion of the frame if function != symbol: new_frame['symbol'] = symbol new_frame['abs_path'] = sfrm.get('filename') or None if new_frame['abs_path']: new_frame['filename'] = posixpath.basename( new_frame['abs_path']) if sfrm.get('line') is not None: new_frame['lineno'] = sfrm['line'] else: new_frame['instruction_offset'] = \ parse_addr(sfrm['instruction_addr']) - \ parse_addr(sfrm['symbol_addr']) if sfrm.get('column') is not None: new_frame['colno'] = sfrm['column'] new_frame['package'] = sfrm['object_name'] \ or new_frame.get('package') new_frame['symbol_addr'] = '0x%x' % \ parse_addr(sfrm['symbol_addr']) new_frame['instruction_addr'] = '0x%x' % parse_addr( sfrm['instruction_addr']) new_frame['in_app'] = sym.is_in_app(raw_frame) if new_frame != frame: new_frames[idx] = new_frame store_raw = True except Exception: logger.exception('Failed to symbolicate') errors.append({ 'type': EventError.NATIVE_INTERNAL_FAILURE, 'error': 'The symbolicator encountered an internal failure', }) # Remember the raw stacktrace. if store_raw and container is not None: container['raw_stacktrace'] = { 'frames': stacktrace['frames'], } # Put the new frames in stacktrace['frames'] = new_frames if errors: data.setdefault('errors', []).extend(errors) return data class NativePlugin(Plugin2): can_disable = False def get_event_preprocessors(self, data, **kwargs): rv = [] if data.get('sentry.interfaces.AppleCrashReport'): rv.append(preprocess_apple_crash_event) if data.get('debug_meta'): rv.append(resolve_frame_symbols) return rv
{ "content_hash": "75efeacf6e7c5e687caa7a26a8ebc10b", "timestamp": "", "source": "github", "line_count": 466, "max_line_length": 84, "avg_line_length": 34.15450643776824, "alnum_prop": 0.5412163860266399, "repo_name": "zenefits/sentry", "id": "9287aa87947e0f28389d7e641bfc8159dfb2ae2b", "size": "15916", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/sentry/lang/native/plugin.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "249557" }, { "name": "HTML", "bytes": "293019" }, { "name": "JavaScript", "bytes": "975797" }, { "name": "Lua", "bytes": "22367" }, { "name": "Makefile", "bytes": "5959" }, { "name": "Python", "bytes": "12550461" }, { "name": "Ruby", "bytes": "4026" }, { "name": "Shell", "bytes": "793" } ], "symlink_target": "" }
"""Module containing update resource class.""" from goodreads_api_client.exceptions import OauthEndpointNotImplemented from goodreads_api_client.resources.base import Resource class Update(Resource): def friends(self): raise OauthEndpointNotImplemented('update.friends')
{ "content_hash": "2627c65f43762380c2f20d9d327e08bb", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 71, "avg_line_length": 31.77777777777778, "alnum_prop": 0.7937062937062938, "repo_name": "mdzhang/goodreads-api-client-python", "id": "414889097c003c2525960c1be5b69689c28171e6", "size": "310", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "goodreads_api_client/resources/update.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1297" }, { "name": "Python", "bytes": "41280" } ], "symlink_target": "" }
import operator from django.db import models from datetime import datetime from django.db.models import Q from filebrowser.fields import FileBrowseField from tinymce import models as tinymce_models from photologue.models import ImageModel, Photo # Create your models here. SPEECH_TYPE_CHOICES = ( ('recorded', 'Recorded'), ('artificial', 'Mainly Artificial'), ) OS_CHOICES = ( ('none', 'None'), ('dedicated', 'Dedicated'), ('windows', 'Windows'), ('macintosh', 'Macintosh'), ('mobile', 'Mobile Devices'), ('other', 'Other'), ) class DraftManager(models.Manager): ''' Use this manager to get objects that have a draft field ''' def get_query_set(self): return super(DraftManager, self).get_query_set().filter(draft=False) def all_with_draft(self): return super(DraftManager, self).get_query_set() def draft_set(self): return super(DraftManager, self).get_query_set().filter(draft=True) def get(self, *args, **kwargs): ''' if a specific record was requested, return it even if it's draft ''' return self.all_with_draft().get(*args, **kwargs) def filter(self, *args, **kwargs): ''' if pk was specified as a kwarg, return even if it's draft ''' if 'pk' in kwargs: return self.all_with_draft().filter(*args, **kwargs) return self.get_query_set().filter(*args, **kwargs) class DeviceImage(models.Model): device = models.ForeignKey('Device', related_name='images') image = FileBrowseField( max_length=200, directory="/images/", extensions=['.jpg', '.jpeg', '.gif','.png','.tif','.tiff'], format='Image', blank=True, null=True ) class SoftwareImage(models.Model): software = models.ForeignKey('Software', related_name='images') image = FileBrowseField( max_length=200, directory="/images/", extensions=['.jpg', '.jpeg', '.gif','.png','.tif','.tiff'], format='Image', blank=True, null=True ) class VocabularyImage(models.Model): vocabulary = models.ForeignKey('Vocabulary', related_name='images') image = FileBrowseField( max_length=200, directory="/images/", extensions=['.jpg', '.jpeg', '.gif','.png','.tif','.tiff'], format='Image', blank=True, null=True ) class DeviceType(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(DeviceType,self).save() class Meta: ordering = ['-id'] class ScanningIndication(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(ScanningIndication,self).save() class PredictionEnhancement(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(PredictionEnhancement,self).save() class Meta: ordering = ['-id'] class AuditoryScanning(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(AuditoryScanning,self).save() class Meta: ordering = ['-id'] class LanguageLevel(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(LanguageLevel,self).save() class Meta: ordering = ['-id'] class OperatingSystem(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(OperatingSystem,self).save() class Meta: ordering = ['-id'] class Supplier(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=30) slug = models.SlugField(unique=True) address = tinymce_models.HTMLField() postcode = models.CharField(max_length=8, null=True, blank=True) phone_number = models.CharField(max_length=50, null=True, blank=True) fax_number = models.CharField(max_length=50, null=True, blank=True) email_address = models.EmailField(null=True, blank=True) website_url = models.CharField(max_length=255, null=True, blank=True) bhta_member = models.BooleanField() switch_access = models.BooleanField() googlemap = models.URLField(null=True, blank=True) no_longer_trading = models.BooleanField() notes = tinymce_models.HTMLField(null=True, blank=True) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(Supplier,self).save() class Document(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) file = models.FileField(upload_to='docs') supplier = models.ForeignKey(Supplier, null=True, blank=True) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(Document,self).save() class Access(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(Access,self).save() class Meta: ordering = ['-id'] class SymbolLibrary(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(SymbolLibrary,self).save() class WheelchairMount(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(WheelchairMount,self).save() class Voice(models.Model): date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) def __unicode__(self): return self.name def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(Voice,self).save() class VocabularyManager(DraftManager): def search(self, terms): q_objects = [] for term in terms: q_objects.append(Q(name=term)) # Start with a bare QuerySet qs = self.get_query_set() # Use operator's or_ to string together all of your Q objects. return qs.filter(reduce(operator.or_, q_objects)) class Vocabulary(models.Model): VOCABULARY_TYPE_CHOICES = ( ('topic', 'Mainly topic based'), ('syntax', 'Mainly syntax based'), ('scene', 'Visual scene based'), ) LANGUAGE_REPRESENTATION_CHOICES = ( ('text', 'Text only (letters, words, phrases)'), ('single', 'Text and single meaning symbols'), ('multi', 'Text and multi-meaning symbols'), ) date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) slug = models.SlugField(unique=True) date_uk_release = models.DateField(null=True, blank=True) draft = models.BooleanField() discontinued = models.BooleanField() update = models.BooleanField() new_features = tinymce_models.HTMLField(null=True, blank=True) short_description = tinymce_models.HTMLField() long_description = tinymce_models.HTMLField(null=True, blank=True) guide_price_gbp = models.PositiveIntegerField(null=True, blank=True) guide_price_na = models.BooleanField() guide_price_notes = tinymce_models.HTMLField(null=True, blank=True) suppliers = models.ManyToManyField(Supplier) type = models.CharField( max_length=6, choices=VOCABULARY_TYPE_CHOICES, null=True, blank=True ) language_level = models.ManyToManyField( LanguageLevel, related_name='vocabulary_language_level', null=True, blank=True ) use_of_colour_for_navigation = models.BooleanField() use_of_colour_for_grammar = models.BooleanField() switch_access = models.BooleanField() on_screen_keyboard = models.BooleanField() language_representation = models.CharField( max_length=6, choices=LANGUAGE_REPRESENTATION_CHOICES, null=True, blank=True ) prediction = models.BooleanField() documents = models.ManyToManyField( Document, related_name = 'vocabulary_documents', null = True, blank = True, ) objects = VocabularyManager() def __unicode__(self): return self.name def get_absolute_url(self): return "/vocabulary/%s/" % (self.slug) def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(Vocabulary,self).save() class SoftwareManager(DraftManager): def search(self, terms): q_objects = [] for term in terms: q_objects.append(Q(name=term)) # Start with a bare QuerySet qs = self.get_query_set() # Use operator's or_ to string together all of your Q objects. return qs.filter(reduce(operator.or_, q_objects)) class Software(models.Model): MESSAGE_VIEW_CHOICES =( ('text', 'Text'), ('symbols', 'Text and symbols'), ) AUDITORY_SCANNING_CHOICES =( ('none', 'None'), ('beep', 'Beep'), ('switch', 'Speech prompt for switch users only'), ('keypress', 'Speech prompt on keypress'), ) date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) slug = models.SlugField(unique=True) date_uk_release = models.DateField( null=True, blank=True, editable=False ) draft = models.BooleanField() discontinued = models.BooleanField() update = models.BooleanField() new_features = tinymce_models.HTMLField(null=True, blank=True) short_description = tinymce_models.HTMLField() long_description = tinymce_models.HTMLField(null=True, blank=True) version = models.CharField(max_length=30, null=True, blank=True) guide_price_gbp = models.PositiveIntegerField(null=True, blank=True) guide_price_na = models.BooleanField() guide_price_notes = tinymce_models.HTMLField(null=True, blank=True) suppliers = models.ManyToManyField(Supplier) operating_system = models.ManyToManyField( OperatingSystem, related_name = 'software_operating_system', null = True, blank = True, ) number_supplied_voices = models.PositiveIntegerField( null=True, blank=True ) supplied_voices = models.ManyToManyField( Voice, related_name='software_supplied_voices', null=True, blank=True ) compatible_voices = models.ManyToManyField(Voice, related_name='software_compatible_voices', null=True, blank=True) access_methods = models.ManyToManyField(Access, related_name='software_access_methods', null=True, blank=True) speech_type = models.CharField(max_length=10, choices=SPEECH_TYPE_CHOICES, null=True, blank=True) number_supplied_symbol_libraries = models.PositiveIntegerField(null=True, blank=True) supplied_symbol_libraries = models.ManyToManyField(SymbolLibrary, related_name='software_supplied_symbol_libraries', null=True, blank=True) compatible_symbol_libraries = models.ManyToManyField(SymbolLibrary, related_name='software_compatible_symbol_libraries', null=True, blank=True) symbol_libraries_notes = tinymce_models.HTMLField(null=True, blank=True) number_supplied_vocabularies = models.PositiveIntegerField(null=True, blank=True) supplied_vocabularies = models.ManyToManyField(Vocabulary, related_name='software_supplied_vocabularies', null=True, blank=True) compatible_vocabularies = models.ManyToManyField(Vocabulary, related_name='software_compatible_vocabularies', null=True, blank=True) vocabularies_notes = tinymce_models.HTMLField(null=True, blank=True) multiple_users = models.BooleanField() second_language_support = models.BooleanField() message_view = models.CharField(max_length=7, choices=MESSAGE_VIEW_CHOICES, null=True, blank=True) editable_dictionary = models.BooleanField() prediction_enhancement = models.ManyToManyField(PredictionEnhancement, related_name='software_prediction_enhancement', null=True, blank=True) auditory_scanning = models.ManyToManyField(AuditoryScanning, related_name='software_auditory_scanning', null=True, blank=True) cell_magnification = models.BooleanField() environmental_control = models.BooleanField() documents = models.ManyToManyField( Document, related_name = 'software_documents', null = True, blank = True, ) objects = SoftwareManager() def __unicode__(self): return self.name def get_absolute_url(self): return "/software/%s/" % (self.slug) def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(Software,self).save() class DeviceManager(DraftManager): def search(self, terms): q_objects = [] for term in terms: q_objects.append(Q(name=term)) q_objects.append(Q(slug=term)) # Start with a bare QuerySet qs = self.get_query_set() # Use operator's or_ to string together all of your Q objects. return qs.filter(reduce(operator.or_, q_objects)) class Device(models.Model): NUMBER_MESSAGES_CHOICES = ( ('single', 'Single'), ('2-16', '2-16'), ('over-16', 'Over 16'), ) INTERNET_CAPABLE_CHOICES = ( ('no', 'No'), ('built-in', 'Yes, built in'), ('add-on', 'Yes, via add-on equipment'), ) KEYGUARD_CHOICES = ( ('no', 'No'), ('built-in', 'Yes, built in'), ('off-the-shelf', 'Yes, off-the-shelf'), ('custom-made', 'Yes, custom-made by supplier'), ) TOUCHSCREEN_CHOICES = ( ('small', 'Palmtop size'), ('medium', 'Paperback size'), ('large', 'Laptop size'), ) MOBILE_PHONE_CAPABLE_CHOICES = ( ('none', 'None'), ('text', 'Text only'), ('built-in', 'Live calling, built in'), ('aircard', 'Live calling, via AirCard'), ('bluetooth', 'Live calling, via Bluetooth'), ) WHEELCHAIR_MOUNT_CHOICES = ( ('none', 'None'), ('included', 'Included'), ('optional', 'Optional'), ) TABLE_STAND_CHOICES = ( ('none', 'None'), ('integral', 'Integral'), ('included', 'Included'), ('optional', 'Optional'), ) date_created = models.DateTimeField(editable=False) date_updated = models.DateTimeField(editable=False) name = models.CharField(max_length=100) slug = models.SlugField(unique=True) aka = models.CharField(max_length=100, null=True, blank=True) date_uk_release = models.DateField(null=True, blank=True, editable=False) draft = models.BooleanField() discontinued = models.BooleanField() update = models.BooleanField() new_features = tinymce_models.HTMLField(null=True, blank=True) device_type = models.ManyToManyField(DeviceType, related_name='device_device_type') short_description = tinymce_models.HTMLField() long_description = tinymce_models.HTMLField(null=True, blank=True) guide_price_gbp = models.PositiveIntegerField(null=True, blank=True) guide_price_na = models.BooleanField() guide_price_notes = tinymce_models.HTMLField(null=True, blank=True) access_methods = models.ManyToManyField( Access, related_name='device_access_methods', null=True, blank=True ) scanning_indication = models.ManyToManyField( ScanningIndication, related_name='device_scanning_indication', null=True, blank=True ) speech_type = models.CharField( max_length=10, choices=SPEECH_TYPE_CHOICES ) internet_capable = models.CharField( max_length = 9, choices = INTERNET_CAPABLE_CHOICES, null = True, blank = True, default = 'no', ) number_messages = models.CharField( max_length=11, choices=NUMBER_MESSAGES_CHOICES, null=True, blank=True ) max_number_messages = models.CharField( max_length=100, null=True, blank=True ) message_levels = models.BooleanField() number_message_levels = models.PositiveIntegerField(null=True, blank=True) number_supplied_vocabularies = models.PositiveIntegerField( null = True, blank = True, editable = False, ) supplied_vocabularies = models.ManyToManyField( Vocabulary, related_name = 'device_supplied_vocabularies', null = True, blank = True, editable = False, ) compatible_vocabularies = models.ManyToManyField( Vocabulary, related_name = 'device_compatible_vocabularies', null = True, blank = True, editable = False, ) vocabularies_notes = tinymce_models.HTMLField( null = True, blank = True, editable = False, ) uses_software = models.BooleanField() install_own_software = models.BooleanField( verbose_name='Can install own software', ) number_supplied_software = models.PositiveIntegerField( null=True, blank=True ) supplied_software = models.ManyToManyField( Software, related_name='device_supplied_software', null=True, blank=True ) compatible_software = models.ManyToManyField( Software, related_name='device_compatible_software', null=True, blank=True ) software_notes = tinymce_models.HTMLField(null=True, blank=True) number_supplied_symbol_libraries = models.PositiveIntegerField( null=True, blank=True ) supplied_symbol_libraries = models.ManyToManyField( SymbolLibrary, related_name='device_supplied_symbol_libraries', null=True, blank=True ) compatible_symbol_libraries = models.ManyToManyField( SymbolLibrary, related_name='device_compatible_symbol_libraries', null=True, blank=True ) symbol_libraries_notes = tinymce_models.HTMLField(null=True, blank=True) keyguards = models.CharField( max_length = 20, choices = KEYGUARD_CHOICES, null = True, blank = True, default = 'no', ) touchscreen = models.CharField( max_length=6, choices=TOUCHSCREEN_CHOICES, null=True, blank=True ) screen_size_cm = models.FloatField(null = True, blank = True) screen_size_na = models.BooleanField() weight_kg = models.FloatField( null=True, blank=True, help_text='Leave blank if not known', ) width_cm = models.FloatField( null=True, blank=True, help_text='Leave blank if not known', ) height_cm = models.FloatField( null=True, blank=True, help_text='Leave blank if not known', ) depth_cm = models.FloatField( null=True, blank=True, help_text='Leave blank if not known', ) environmental_control = models.BooleanField() environmental_control_notes = tinymce_models.HTMLField( null=True, blank=True ) operating_system = models.ManyToManyField( OperatingSystem, related_name = 'device_operating_system', null = True, blank = True, ) mobile_phone_capable = models.CharField( max_length=9, choices=MOBILE_PHONE_CAPABLE_CHOICES, null=True, blank=True, ) battery_life = models.TextField( null = True, blank = True, ) battery_life_hours = models.FloatField( null = True, blank = True, ) wheelchair_mount = models.CharField( max_length = 8, choices = WHEELCHAIR_MOUNT_CHOICES, null = True, blank = True, default = 'none', ) compatible_wheelchair_mount = models.ManyToManyField( WheelchairMount, related_name='device_compatible_wheelchair_mount', null=True, blank=True ) table_stand = models.CharField( max_length = 8, choices = TABLE_STAND_CHOICES, null = True, blank = True, default = 'none', ) touchscreen = models.CharField( max_length = 6, choices = TOUCHSCREEN_CHOICES, null = True, blank = True, ) documents = models.ManyToManyField( Document, related_name = 'device_documents', null = True, blank = True, ) colours = tinymce_models.HTMLField(null=True, blank=True) warranty_notes = tinymce_models.HTMLField(null=True, blank=True) suppliers = models.ManyToManyField(Supplier) objects = DeviceManager() def __unicode__(self): return self.name def get_absolute_url(self): return "/device/%s/" % (self.slug) def save(self, **kwargs): if not self.id: self.date_created = datetime.now() # Edit created timestamp only if it's new entry self.date_updated = datetime.now() super(Device,self).save()
{ "content_hash": "6df6f2fc51a8061daf0419fecc4b7f59", "timestamp": "", "source": "github", "line_count": 773, "max_line_length": 147, "avg_line_length": 32.2496765847348, "alnum_prop": 0.6328372578121866, "repo_name": "ACECentre/SpeechBubblev1", "id": "23c70b5d25ae1856a434b29ba954571bde86c1d3", "size": "24929", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "speechbubble/speechbubble/voca/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "401451" }, { "name": "JavaScript", "bytes": "3184167" }, { "name": "PHP", "bytes": "49698" }, { "name": "Perl", "bytes": "35147" }, { "name": "Python", "bytes": "126815" } ], "symlink_target": "" }
class GameObject: def __init__(self, game, physics=None, renderable=None): self.game = game self.physics = physics self.renderable = renderable def update(self): print(self.getGamePosition()) def render(self, renderer): pass def getGamePosition(self): position = self.physics.body.position position.y *= -1 return (position.x, position.y) def setGamePosition(self, x, y): self.physics.body.position.x = x self.physics.body.position.y = y * -1
{ "content_hash": "53360d503b7a3f300051b78a19202d3c", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 60, "avg_line_length": 26.285714285714285, "alnum_prop": 0.6032608695652174, "repo_name": "jadmz/pygame-box2d-template", "id": "ebbd2ad0ec3b9f1d8ffdc29f54ed2990b5f5f3c7", "size": "554", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/game_object.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7631" } ], "symlink_target": "" }
"""Confirms subscription to a topic. This is used to confirm a subscription of an HTTP(S) endpoint to a topic created on AWS Simple Notification Service (SNS). It is supposed to run on the endpoint that is being subscribed. Usage: ./confirm_subscription.py [options] """ import BaseHTTPServer import json import optparse import SimpleHTTPServer import SocketServer import ssl import sys import urllib2 from xml.etree import ElementTree class Error(Exception): pass class Defaults(object): """Default settings. """ PORT = 8080 class MessageType(object): """Represents SNS message type. """ CONFIRMATION = 'SubscriptionConfirmation' NOTIFICATION = 'Notification' class Server(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): """Use the ThreadingMixIn to handle requests in multiple threads. """ pass class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """Handles confirmation requests from AWS SNS. """ # Use HTTP/1.1 by default. The caveat is that the Content-Length # header must be specified in all responses. protocol_version = "HTTP/1.1" def do_POST(self): try: size = int(self.headers.getheader('content-length')) doc = json.loads(self.rfile.read(size)) message_type = self.headers.getheader('x-amz-sns-message-type') if MessageType.CONFIRMATION == message_type: _handle_confirmation(doc) elif MessageType.NOTIFICATION == message_type: _handle_notification(doc) else: raise Error('unsupported message type \'{0}\'' .format(message_type)) except (Error, Exception), err: self.send_response(404) self.send_header('Content-Length', '0') self.end_headers() return self.send_response(200) self.send_header('Content-Length', '0') self.end_headers() def do_GET(self): self.send_response(403) self.send_header('Content-Length', '0') self.end_headers() def _handle_confirmation(self, data): response = urllib2.urlopen(data['SubscribeURL']) xml = ElementTree.XML(response.read()) arn = xml.find('ConfirmSubscriptionResult/SubscriptionArn') print arn.text def _handle_notification(self, data): print 'Subject: \'{0}\'\nMessage: \'{1}\'\nTime: \'{2}\'' \ .format(data['Subject'], data['Message'], data['Timestamp']) def main(): parser = optparse.OptionParser('Usage: %prog [options]') parser.add_option('-p', '--port', dest='port', default=Defaults.PORT, help='The port number to listen on. This option is not required and ' 'is set to 8080 by default.') parser.add_option('-s', '--ssl', dest='ssl', action='store_true', help='Enable SSL/TLS. This option is not required.') parser.add_option('-k', '--key', dest='key', help='A private key file to be used when SSL is enabled.') parser.add_option('-c', '--cert', dest='cert', help='A certificate file to be used when SSL is enabled.') (opts, args) = parser.parse_args() if (0 != len(args) or (opts.ssl and (opts.cert is None or opts.key is None))): parser.print_help() return 1 try: server = Server(('', int(opts.port)), RequestHandler) if opts.ssl: server.socket = ssl.wrap_socket(server.socket, server_side=True, ssl_version=ssl.PROTOCOL_TLSv1, certfile=opts.cert, keyfile=opts.key) server.serve_forever() except Error, err: sys.stderr.write('[ERROR] {0}\n'.format(err)) return 1 return 0 if __name__ == '__main__': sys.exit(main())
{ "content_hash": "645960c359ab10566071c7d0eaed1d49", "timestamp": "", "source": "github", "line_count": 128, "max_line_length": 77, "avg_line_length": 30.015625, "alnum_prop": 0.6158250910983862, "repo_name": "ezhuk/aws-tools", "id": "3e84391ad35d095e76e8bc530b0c13d73a736d47", "size": "3997", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sns/confirm_subscription.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "49120" } ], "symlink_target": "" }
import csv from django.core.exceptions import PermissionDenied from django.http import HttpResponse from openslides.agenda.models import Speaker from openslides.utils.views import View class CSVExportView(View): """ View to export the lists of speakers of all agenda items as csv. """ def get(self, request, *args, **kwargs): if not request.user.has_perm('agenda.can_manage'): raise PermissionDenied response = HttpResponse() response['Content-Disposition'] = 'attachment; filename=list_of_speakers.csv;' csv_writer = csv.writer(response) csv_writer.writerow(['Item', 'Person', 'Begin Time', 'End Time']) for speaker in Speaker.objects.all().order_by('item', 'weight', 'begin_time'): try: begin_time = speaker.begin_time.strftime('%d.%m.%Y %H:%M:%S') except AttributeError: begin_time = None try: end_time = speaker.end_time.strftime('%d.%m.%Y %H:%M:%S') except AttributeError: end_time = None csv_writer.writerow([str(speaker.item), str(speaker.user), begin_time, end_time]) return response
{ "content_hash": "43f8dc287b0809233c1efa882539b6ca", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 93, "avg_line_length": 40.1, "alnum_prop": 0.6176226101413134, "repo_name": "normanjaeckel/openslides-csv-export", "id": "bef904584db54feb6e4a530f1045b9905402eb21", "size": "1203", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "openslides_csv_export/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "589" }, { "name": "Python", "bytes": "6333" } ], "symlink_target": "" }
import re re_number = re.compile(r'\d') #re_string = re.compile(r'["\'][^"\']*["\']') #"[^"]*"|'[^']*' re_string = re.compile(r'"[^"]*"|\'[^\']*\'') def regex_labels(document, l): """ Add a list of (regex, label) to the labeling of document """ for regex, label in l: matches = regex.finditer(document.text) for match in matches: document.highlighting.highlight((match.start(), match.end()), label)
{ "content_hash": "38cf8abd9dd75af5b362a493b0bc4ea7", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 80, "avg_line_length": 33.61538461538461, "alnum_prop": 0.562929061784897, "repo_name": "Chiel92/fate", "id": "f7b5303f76f23581a70a6ca04299d64c49164c3e", "size": "437", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fate/highlighting/common.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "160466" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models from django.contrib.auth.models import User # Create your models here. class Blog(models.Model): STATUS_CHOICES = ( ('d', "draft"), ('p', "published"), ) title = models.CharField("Title", max_length=128, db_index=True, ) abstract = models.CharField("Abstract", max_length=128, default="") content = models.TextField("Content", ) author = models.ForeignKey(User, verbose_name="author") tags = models.ManyToManyField("tag", blank=True) isShown = models.BooleanField("Visible", default=True) createTime = models.DateTimeField("Create Time", auto_now_add=True) updateTime = models.DateTimeField("Update Time", auto_now=True) status = models.CharField('Status', max_length=1, choices=STATUS_CHOICES, default=STATUS_CHOICES[0][0]) top = models.BooleanField("Set to Top", default=False) class Tag(models.Model): tag = models.CharField("tag", max_length=50, db_index=True, ) def __str__(self): return self.tag
{ "content_hash": "d9d79d20df53cfaeb90794d0608eaecb", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 107, "avg_line_length": 34.16129032258065, "alnum_prop": 0.6789423984891407, "repo_name": "wenxuan-xia/niv_blog", "id": "953b95357227f8a13cb8da34aaf32593d9df903f", "size": "1059", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "core/blog/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "86161" }, { "name": "HTML", "bytes": "9134" }, { "name": "JavaScript", "bytes": "41482" }, { "name": "Python", "bytes": "8767" } ], "symlink_target": "" }
from dnload.glsl_block import GlslBlock ######################################## # GlslBlockDefault ##################### ######################################## class GlslBlockDefault(GlslBlock): """Default 'fallback' GLSL block.""" def __init__(self, content): """Constructor.""" GlslBlock.__init__(self) self.__content = content def format(self, force): """Return formatted output.""" ret = [] for ii in self.__content: if isinstance(ii, str): ret += [ii] else: ret += [ii.format(force)] return " ".join(ret) def __str__(self): """String representation.""" return "Default(%i)" % (len(self.__content)) ######################################## # Functions ############################ ######################################## def glsl_parse_default(source): """Parse default block, will be output as-is, should never happen.""" print("WARNING: returning default GLSL block: '%s'" % (str(map(str, source)))) return [GlslBlockDefault(source)]
{ "content_hash": "3057afca6122d912609662e8dafc8f51", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 82, "avg_line_length": 30.75, "alnum_prop": 0.45709123757904246, "repo_name": "trilkk/dnload", "id": "048990f009452bea2b5a66c8655e7b10bdc96129", "size": "1107", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "dnload/glsl_block_default.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "22121" }, { "name": "C++", "bytes": "126700" }, { "name": "CMake", "bytes": "28671" }, { "name": "Python", "bytes": "128256" }, { "name": "Shell", "bytes": "2211" } ], "symlink_target": "" }
import os, argparse import tensorflow as tf # The original freeze_graph function # from tensorflow.python.tools.freeze_graph import freeze_graph dir = os.path.dirname(os.path.realpath(__file__)) def freeze_graph(model_dir, output_node_names,export_dir): """Extract the sub graph defined by the output nodes and convert all its variables into constant Args: model_dir: the root folder containing the checkpoint state file output_node_names: a string, containing all the output node's names, comma separated """ if not tf.gfile.Exists(export_dir): raise AssertionError( "Export directory doesn't exists. Please specify an export " "directory: %s" % export_dir) if not output_node_names: print("You need to supply the name of a node to --output_node_names.") return -1 # We retrieve our checkpoint fullpath checkpoint = tf.train.get_checkpoint_state(model_dir) input_checkpoint = checkpoint.model_checkpoint_path # We precise the file fullname of our freezed graph absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1]) output_graph = absolute_model_dir + "/frozen_model.pb" # We clear devices to allow TensorFlow to control on which device it will load operations clear_devices = True # We start a session using a temporary fresh Graph with tf.Session(graph=tf.Graph()) as sess: # We import the meta graph in the current default Graph saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices) # We restore the weights saver.restore(sess, input_checkpoint) #writer=tf.summary.FileWriter("./logs",sess.graph_def) # We use a built-in TF helper to export variables to constants output_graph_def = tf.graph_util.convert_variables_to_constants( sess, # The session is used to retrieve the weights tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes output_node_names.split(",") # The output node names are used to select the usefull nodes ) # Finally we serialize and dump the output graph to the filesystem with tf.gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node)) return output_graph_def if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--model_dir", type=str, default="/home/lee/Downloads/Classify-HandGesturePose-master/pose2d/", help="Model folder to export") parser.add_argument("--output_node_names", type=str, default="ResizeBilinear", help="The name of the output nodes, comma separated.") args = parser.parse_args() freeze_graph(args.model_dir, args.output_node_names, "/home/lee/Downloads/Classify-HandGesturePose-master/posemodel/")
{ "content_hash": "3ba447ec4ec7be0f53cfbc88e0f2f06c", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 150, "avg_line_length": 43.970588235294116, "alnum_prop": 0.676923076923077, "repo_name": "dedoogong/asrada", "id": "867c24d5771a0815871bc1113fe14b38937dba92", "size": "2990", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "utils/ckpt2pb.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "190309" }, { "name": "Python", "bytes": "496594" }, { "name": "RenderScript", "bytes": "27079" }, { "name": "Shell", "bytes": "2226" } ], "symlink_target": "" }
from django.contrib import admin from models import Tileset class TilesetAdmin(admin.ModelAdmin): fields = ['name', 'created_by', 'server_url', 'server_service_type', 'server_username', 'server_password', 'layer_name', 'layer_zoom_start', 'layer_zoom_stop', 'geom'] list_display = ('name', 'layer_name', 'server_url', 'created_by', 'created_at') search_fields = ['name'] admin.site.register(Tileset, TilesetAdmin)
{ "content_hash": "dff6201420ae19ea098d1a3b09302838", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 171, "avg_line_length": 42.8, "alnum_prop": 0.6985981308411215, "repo_name": "ROGUE-JCTD/django-tilebundler", "id": "f1d6e330e86735717aed50c1e9fe9e59da126655", "size": "428", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tilebundler/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "26" }, { "name": "HTML", "bytes": "835" }, { "name": "Python", "bytes": "31771" } ], "symlink_target": "" }
from prolog.interpreter.parsing import parse_file, TermBuilder from prolog.interpreter.term import Atom, Number, Term, Callable, \ specialized_term_classes, NumberedVar, MutableCallable from prolog.interpreter.test.tool import parse from prolog.interpreter.heap import Heap import py def parse(inp): t = parse_file(inp) builder = TermBuilder() return builder.build(t) atom = parse('a.')[0] term = parse('t(a, b, c, d, f).')[0] def test_atom_get_signature(): r = atom.get_prolog_signature() r.name() == '/' assert r.argument_at(0).signature().string() == 'a/0' assert r.argument_at(1).num == 0 def test_atom_get_arguments(): assert atom.arguments() == [] def test_atom_arguemtn_count(): assert atom.argument_count() == 0 def test_atom_get_argument_at(): assert py.test.raises(IndexError, 'atom.argument_at(0)') def test_term_get_signature(): r = term.get_prolog_signature() print r assert r.name() == '/' r.name() == '/' assert r.argument_at(0).signature().string() == 't/0' assert r.argument_at(1).num == 5 def test_term_get_arguments(): t = term.arguments() assert isinstance(t, list) assert len(t) == 5 def test_term_get_argument_out_of_range(): py.test.raises(IndexError, 'term.argument_at(5)') def test_term_get_argument_in_range(): t = term.argument_at(2) assert t.name() == 'c' def test_term_argument_count(): assert term.argument_count() == 5 def test_callable_name(): c = Callable() py.test.raises(NotImplementedError, 'c.name()') def test_callable_signature(): c = Callable() py.test.raises(NotImplementedError, 'c.signature()') def test_atom_name(): assert atom.name() == 'a' def test_atom_signature(): assert atom.signature().string() == 'a/0' def test_term_name(): assert term.name() == 't' def test_term_signature(): assert term.signature().string() == 't/5' def test_callable_factory_for_atom(): r = Callable.build('foo') assert isinstance(r, Atom) assert r.signature().string() == 'foo/0' def test_callable_factory_for_term_with_empty_args(): r = Callable.build('bar', []) assert isinstance(r, Atom) assert r.signature().string() == 'bar/0' def test_callable_factory_for_term(): r = Callable.build('foo', [1, 2]) assert isinstance(r, Callable) assert r.signature().string() == 'foo/2' def test_callable_factory_for_cons(): r = Callable.build('.', [1, Callable.build('[]')]) assert isinstance(r, specialized_term_classes['.', 2]) assert r.signature().string() == './2' assert r.name() == '.' assert r.argument_count() == 2 assert r.arguments() == [1, Callable.build('[]')] assert r.argument_at(0) == 1 assert r.argument_at(1) == Callable.build('[]') def test_callable_mutable(): for name in [".", "f"]: t = Callable.build(name, [NumberedVar(0), NumberedVar(1)]) res = t.copy_standardize_apart(Heap(), [None, None]) assert isinstance(res, MutableCallable) res.set_argument_at(0, 1) assert res.argument_at(0) == 1 res.set_argument_at(1, 7) assert res.argument_at(0) == 1 assert res.argument_at(1) == 7
{ "content_hash": "5d63a16758cc796f98dd872b3335dc8e", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 67, "avg_line_length": 30.203703703703702, "alnum_prop": 0.6204782342121398, "repo_name": "cosmoharrigan/pyrolog", "id": "c3f80152111b0392b04e7a0fdac04108e08bc17e", "size": "3262", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "prolog/interpreter/test/test_callable_interface.py", "mode": "33188", "license": "mit", "language": [ { "name": "Prolog", "bytes": "11037" }, { "name": "Python", "bytes": "588612" } ], "symlink_target": "" }
from subprocess import check_output print(check_output(['hostname', '-I']))
{ "content_hash": "585967b4a90b669c95aa79c54efe913e", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 39, "avg_line_length": 38, "alnum_prop": 0.7368421052631579, "repo_name": "cazacov/InternetOfThings", "id": "98e6aa5c37808029db2f7bbb5b481ae6db1678e1", "size": "76", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "raspberry/IpTest/showip.py", "mode": "33261", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "24603" }, { "name": "Arduino", "bytes": "50930" }, { "name": "Batchfile", "bytes": "150" }, { "name": "C", "bytes": "171671" }, { "name": "C#", "bytes": "61299" }, { "name": "C++", "bytes": "246262" }, { "name": "CSS", "bytes": "27048" }, { "name": "HTML", "bytes": "8382" }, { "name": "Java", "bytes": "3980" }, { "name": "JavaScript", "bytes": "1042157" }, { "name": "Python", "bytes": "3970" }, { "name": "Shell", "bytes": "210" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('articles', '0011_seriespage_subtitle'), ] operations = [ migrations.RenameField( model_name='seriespage', old_name='image', new_name='main_image', ), ]
{ "content_hash": "f4d2d5c5c4d4badb2311a478e6131b79", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 49, "avg_line_length": 20.666666666666668, "alnum_prop": 0.5860215053763441, "repo_name": "albertoconnor/website", "id": "1556db0eaefbda518b91f4c0e9731298ba5e0109", "size": "396", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "articles/migrations/0012_auto_20150707_2303.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "26874" }, { "name": "HTML", "bytes": "90495" }, { "name": "JavaScript", "bytes": "13768" }, { "name": "Python", "bytes": "254428" }, { "name": "Shell", "bytes": "985" } ], "symlink_target": "" }
''' Models (mostly base classes) for the various kinds of renderer types that Bokeh supports. ''' from __future__ import absolute_import import logging logger = logging.getLogger(__name__) from ..core.enums import RenderLevel from ..core.has_props import abstract from ..core.properties import Auto, Bool, Either, Enum, Float, Instance, Override, String from ..core.validation import error from ..core.validation.errors import BAD_COLUMN_NAME, MISSING_GLYPH, NO_SOURCE_FOR_GLYPH from ..model import Model from .glyphs import Glyph from .images import ImageSource from .sources import ColumnDataSource, DataSource, RemoteSource from .tiles import TileSource, WMTSTileSource @abstract class Renderer(Model): '''An abstract base class for renderer types. ''' level = Enum(RenderLevel, help=""" Specifies the level in which to paint this renderer. """) visible = Bool(default=True, help=""" Is the renderer visible. """) @abstract class DataRenderer(Renderer): ''' An abstract base class for data renderer types (e.g. ``GlyphRenderer``, ``TileRenderer``). ''' class TileRenderer(DataRenderer): ''' ''' tile_source = Instance(TileSource, default=lambda: WMTSTileSource(), help=""" Local data source to use when rendering glyphs on the plot. """) alpha = Float(1.0, help=""" tile opacity 0.0 - 1.0 """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering glyphs on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering glyphs on the plot. If unset, use the default y-range. """) level = Override(default="underlay") render_parents = Bool(default=True, help=""" Flag enable/disable drawing of parent tiles while waiting for new tiles to arrive. Default value is True. """) class DynamicImageRenderer(DataRenderer): ''' ''' image_source = Instance(ImageSource, help=""" Image source to use when rendering on the plot. """) alpha = Float(1.0, help=""" tile opacity 0.0 - 1.0 """) level = Override(default="underlay") render_parents = Bool(default=True, help=""" Flag enable/disable drawing of parent tiles while waiting for new tiles to arrive. Default value is True. """) class GlyphRenderer(DataRenderer): ''' ''' @error(MISSING_GLYPH) def _check_missing_glyph(self): if not self.glyph: return str(self) @error(NO_SOURCE_FOR_GLYPH) def _check_no_source_for_glyph(self): if not self.data_source: return str(self) @error(BAD_COLUMN_NAME) def _check_bad_column_name(self): if not self.glyph: return if not self.data_source: return if isinstance(self.data_source, RemoteSource): return missing = set() specs = self.glyph.dataspecs() for name, item in self.glyph.properties_with_values(include_defaults=False).items(): if name not in specs: continue if not isinstance(item, dict): continue if not isinstance(self.data_source, ColumnDataSource): continue if 'field' in item and item['field'] not in self.data_source.column_names: missing.add(item['field']) if missing: return "%s [renderer: %s]" % (", ".join(sorted(missing)), self) data_source = Instance(DataSource, help=""" Local data source to use when rendering glyphs on the plot. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering glyphs on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering glyphs on the plot. If unset, use the default -range. """) glyph = Instance(Glyph, help=""" The glyph to render, in conjunction with the supplied data source and ranges. """) selection_glyph = Either(Auto, Instance(Glyph), default="auto", help=""" An optional glyph used for selected points. If set to "auto" then the standard glyph will be used for selected points. """) nonselection_glyph = Either(Auto, Instance(Glyph), default="auto", help=""" An optional glyph used for explicitly non-selected points (i.e., non-selected when there are other points that are selected, but not when no points at all are selected.) If set to "auto" then a glyph with a low alpha value (0.1) will be used for non-selected points. """) hover_glyph = Instance(Glyph, help=""" An optional glyph used for inspected points, e.g., those that are being hovered over by a HoverTool. """) muted_glyph = Instance(Glyph, help=""" """) muted = Bool(False, help=""" """) level = Override(default="glyph") @abstract class GuideRenderer(Renderer): ''' A base class for all guide renderer types. ``GuideRenderer`` is not generally useful to instantiate on its own. ''' plot = Instance(".models.plots.Plot", help=""" The plot to which this guide renderer is attached. """) def __init__(self, **kwargs): super(GuideRenderer, self).__init__(**kwargs) if self.plot is not None: if self not in self.plot.renderers: self.plot.renderers.append(self) level = Override(default="overlay")
{ "content_hash": "e49d0c80eb7c63f65227c728f511891a", "timestamp": "", "source": "github", "line_count": 189, "max_line_length": 109, "avg_line_length": 29.67195767195767, "alnum_prop": 0.6551355206847361, "repo_name": "schoolie/bokeh", "id": "6021c457ba2db03147c8b49ae0dc1774c5e413b8", "size": "5608", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "bokeh/models/renderers.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "1442" }, { "name": "CSS", "bytes": "92841" }, { "name": "CoffeeScript", "bytes": "1132562" }, { "name": "HTML", "bytes": "47972" }, { "name": "JavaScript", "bytes": "25865" }, { "name": "Jupyter Notebook", "bytes": "3981" }, { "name": "Makefile", "bytes": "1164" }, { "name": "Python", "bytes": "2426955" }, { "name": "Shell", "bytes": "3718" }, { "name": "TypeScript", "bytes": "130084" } ], "symlink_target": "" }
import spectral as sp import os import numpy as np import Classifier_SAM as SAM from glob import glob testOxi = 1 normalize_button = 0 # do nothing to testing data SP array. def dataProcess_alg_pass(SP): return SP #exclude the background pixel, into an array(spectrum) and return T/F, True: background; False: not a background def exclude_BG(pixel_array): if sum(pixel_array) == 0: return True else: return False #cal the average spectrum of a img. Input a img and return an array (the same format as Spectral lib's ) def cal_avg_SP(img): width, height, deepth = img.shape sum_SP = 0 count = 0 for i in range(width): for j in range(height): pixel_SP = img[i,j] if exclude_BG(pixel_SP): continue else: sum_SP += pixel_SP count += 1 return sum_SP / count # input testing data type, input index number and file type. (both manually or UI). This could input one data file and return img_testing def input_testing_data(index = '10',type = 'oxido', check_all = 0): filePath = 'data/' if not check_all : num = input("input the index number of your testing oxido file(01-41)\n") type = input("input the file type you want to test, oxido/ sulfuro or 1/2\n") if type == 'oxido': fileName_testing = 'oxidos/EscOx'+ num + 'B1_rough_SWIR.hdr' elif type == 'sulfuro': fileName_testing = 'sulfuros/EscSulf' + num + '_Backside_SWIR_Subset_Masked.hdr' elif check_all: if type == 'oxido': fileName_testing = 'oxidos/EscOx'+ index + 'B1_rough_SWIR.hdr' elif type == 'sulfuro': fileName_testing = 'sulfuros/EscSulf' + index + '_Backside_SWIR_Subset_Masked.hdr' try: img_testing = sp.open_image(filePath + fileName_testing) except Exception as err: print('Cannot open your testing file.\n Error info:' + str(err.args), end = '\n') exit(0) return img_testing # input SP_reference_oxido, sulfuro, check_all =1 if you want to check all files and got accuracy file. and dataProcess_alg to process your testing sp array def check(SP_ref_oxido, SP_ref_sulfuro, check_all = 0, dataProcess_alg = dataProcess_alg_pass, classifier = SAM.classifier_SAM,file_acc_name = 'acc_res.txt'): if check_all == 1: filePath = 'data/' files_list_oxi = glob(filePath + 'oxidos/'+"*.hdr") files_list_sul = glob(filePath + 'sulfuros/'+'*.hdr') num_oxi = len(files_list_oxi) num_sul = len(files_list_sul) #accuracy dict, the index is testing files' name and values is a list [single_SP, all_SP] acc_dict_oxi = {} acc_dict_sul = {} #switch global testOxi #check all the oxidos. for i in range(num_oxi): if testOxi == 0: break index0 = files_list_oxi[i].split('Esc')[-1].split('Ox')[-1][0:2] img_testing = input_testing_data(index = index0, type = 'oxido', check_all = check_all) #core alg of check # ////////SP_ref_oxido, SP_ref_sulfuro = input_training_data(use_multi_SP_as_reference = 1) res, accurarcy = Tranversing(SP_ref_oxido, SP_ref_sulfuro, img_testing, testingType = 'oxido', dataProcess_alg = dataProcess_alg,classifier = classifier) acc_dict_oxi.setdefault(str(img_testing).split('/')[2].split('_')[0] + '_res.bmp',accurarcy) #acc_dict_oxi[str(img_testing).split('/')[2].split('_')[0] + '_res.bmp'].append(accurarcy) #showing the progress acc_key = str(img_testing).split('/')[2].split('_')[0] + '_res.bmp' print('%s %f \n' % (acc_key, acc_dict_oxi[acc_key] )) #check all the sulfuros for i in range(num_sul): index0 = files_list_sul[i].split('Esc')[-1].split('Sulf')[-1][0:2] # attention: debug err? after split('Esc'), u got a list containing only one element.... x[0] == x[-1] img_testing = input_testing_data(index = index0, type = 'sulfuro', check_all = check_all) #core alg of check # /////SP_ref_oxido, SP_ref_sulfuro = input_training_data(use_multi_SP_as_reference = 0) res, accurarcy = Tranversing(SP_ref_oxido, SP_ref_sulfuro, img_testing, testingType = 'sulfuro', dataProcess_alg = dataProcess_alg,classifier = classifier) acc_dict_sul.setdefault(str(img_testing).split('/')[2].split('_')[0] + '_res.bmp',accurarcy) #acc_dict_sul[str(img_testing).split('/')[2].split('_')[0] + '_res.bmp'].append(accurarcy) #showing the progress acc_key = str(img_testing).split('/')[2].split('_')[0] + '_res.bmp' print('%s %f \n' % (acc_key, acc_dict_sul[acc_key] )) #write the results into txt file_res = open(filePath +file_acc_name, 'w') file_res.write('fileName \t \t Accuracy\n') acc_dict_oxi = sorted(acc_dict_oxi.items(), key = lambda d: d[0]) acc_dict_sul = sorted(acc_dict_sul.items(), key = lambda d: d[0]) for i in range(len(acc_dict_oxi)): file_res.write("%s \t %f\n" % (acc_dict_oxi[i][0],acc_dict_oxi[i][1])) for i in range(len(acc_dict_sul)): file_res.write("%s \t %f\n" % (acc_dict_sul[i][0],acc_dict_sul[i][1])) elif check_all == 0: #input testing data img_testing = input_testing_data() # tranversing the img and cal spectral angle between testImg and refImg. #Input: testing img and reference img. if 'Sulf' in str(img_testing): res, accurarcy = Tranversing(SP_ref_oxido,SP_ref_sulfuro, img_testing, testingType = 'sulfuro', dataProcess_alg = dataProcess_alg) else: res, accurarcy = Tranversing(SP_ref_oxido,SP_ref_sulfuro, img_testing, testingType = 'oxido',dataProcess_alg = dataProcess_alg) width, height, deepth = img_testing.shape resName = str(img_testing).split('/')[2].split('_')[0] + '_res.bmp' filePath = 'data/' show_res(res,accurarcy, width, height, filePath, resName, showImg = 0) # Tranverse the whole image and return its' pixel record list and checking accuracy def Tranversing(SP_reference1, SP_reference2, img_testing, testingType = 'oxido', dataProcess_alg = dataProcess_alg_pass, classifier = SAM.classifier_SAM): width, height, deepth = img_testing.shape deepth = len(SP_reference1) #res is a list that would save the classification result, 2 is background, 1 is right, 0 is wrong. res = [] # the pixel number of background count_bg = 0 count_right = 0 for i in range(width): for j in range(height): SP_testing = img_testing[i,j] # if this pixel is background, res = 2 if exclude_BG(SP_testing): res.append(2) count_bg += 1 continue # pre-algorithm process data. SP_testing = dataProcess_alg(SP_testing) #testing, debugging if normalize_button == 1: import SP_paras SP_testing, SP_reference1, SP_reference2 = SP_paras.normalize(SP_reference1,SP_reference2,SP_testing) # compute spectrum angles. class_type = classifier(SP_reference1,SP_reference2,SP_testing) # attention please: this is the red mark code, maybe u could add more barriers here. # attention please: now ref1 is oxido, ref2 is sulfuro, testing img is a oxido if testingType == 'oxido' or testingType == 1: if class_type == 1: res.append(1) count_right += 1 else: res.append(0) elif testingType == 'sulfuro'or testingType == 2: if class_type == 2: res.append(1) count_right += 1 else: res.append(0) accurarcy = count_right / (width * height - count_bg) return [res,accurarcy] #load average sulfuros data, return a spectrum array. def load_training_SP(type = 'sulfuro'): filePath = 'data/' # fileName = 'sulfuros/' fileNameList = os.listdir(filePath + type +'s/') # glob(filePath + 'sulfuros/' + '*.hdr') fileName_aver = 'spectrum_average.txt' if fileName_aver in fileNameList : file_temp = open(filePath + type + 's/' + fileName_aver) sp_average = file_temp.read() sp_average = sp_average.replace('[', '') sp_average = sp_average.replace(']', '') sp_average = sp_average.split() file_temp.close() return np.array(sp_average ,dtype = np.float) else: file_temp = sp.open_image(filePath + type + 's/' + fileNameList[1]) file_temp.close() pass sp.open_image(filePath + fileName) #attention: other choices to input a training spectrum def load_image(type = 'oxido',index = '01', filePath = None): if filePath == None: if type == 'oxido': filePath = 'data/oxidos/' fileName = 'EscOx' + index + 'B1_rough_SWIR.hdr' elif type == 'sulfro': filePath = 'data/sulfuros/' fileName = 'EscSulf' + index + '_Backside_SWIR_Subset_Masked.hdr' image = sp.open_image(filePath + fileName) else: image = sp.open_image(filePath) return image
{ "content_hash": "d381563604b9c04b60f59be78e2f45d6", "timestamp": "", "source": "github", "line_count": 226, "max_line_length": 181, "avg_line_length": 42.52654867256637, "alnum_prop": 0.580168556861929, "repo_name": "Vincentyao1995/Globalink2017-UBC", "id": "e41c1785dfce9f21acf5156999428bc06d65bb31", "size": "9613", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Vincent/REE_Patterns/test_algorithm.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "10099" }, { "name": "Python", "bytes": "329767" } ], "symlink_target": "" }
from collections import defaultdict from collections import OrderedDict import copy import functools import logging import types from django.conf import settings from django import shortcuts from django.template.loader import render_to_string from django import urls from django.utils.functional import Promise from django.utils.http import urlencode from django.utils.translation import ugettext_lazy as _ import six from horizon import exceptions from horizon import messages from horizon.utils import functions from horizon.utils import html from horizon.utils import settings as utils_settings LOG = logging.getLogger(__name__) # For Bootstrap integration; can be overridden in settings. ACTION_CSS_CLASSES = () STRING_SEPARATOR = "__" class BaseActionMetaClass(type): """Metaclass for adding all actions options from inheritance tree to action. This way actions can inherit from each other but still use the class attributes DSL. Meaning, all attributes of Actions are defined as class attributes, but in the background, it will be used as parameters for the initializer of the object. The object is then initialized clean way. Similar principle is used in DataTableMetaclass. """ def __new__(mcs, name, bases, attrs): # Options of action are set as class attributes, loading them. options = {} if attrs: options = attrs # Iterate in reverse to preserve final order for base in bases[::-1]: # It actually throws all super classes away except immediate # superclass. But it's fine, immediate super-class base_options # includes everything because superclasses was created also by # this metaclass. Same principle is used in DataTableMetaclass. if hasattr(base, 'base_options') and base.base_options: base_options = {} # Updating options by superclasses. base_options.update(base.base_options) # Updating superclass options by actual class options. base_options.update(options) options = base_options # Saving all options to class attribute, this will be used for # instantiating of the specific Action. attrs['base_options'] = options return type.__new__(mcs, name, bases, attrs) def __call__(cls, *args, **kwargs): cls.base_options.update(kwargs) # Adding cls.base_options to each init call. klass = super(BaseActionMetaClass, cls).__call__( *args, **cls.base_options) return klass @six.add_metaclass(BaseActionMetaClass) class BaseAction(html.HTMLElement): """Common base class for all ``Action`` classes.""" def __init__(self, **kwargs): super(BaseAction, self).__init__() self.datum = kwargs.get('datum', None) self.table = kwargs.get('table', None) self.handles_multiple = kwargs.get('handles_multiple', False) self.requires_input = kwargs.get('requires_input', False) self.preempt = kwargs.get('preempt', False) self.policy_rules = kwargs.get('policy_rules', None) self.action_type = kwargs.get('action_type', 'default') def data_type_matched(self, datum): """Method to see if the action is allowed for a certain type of data. Only affects mixed data type tables. """ if datum: action_data_types = getattr(self, "allowed_data_types", []) # If the data types of this action is empty, we assume it accepts # all kinds of data and this method will return True. if action_data_types: datum_type = getattr(datum, self.table._meta.data_type_name, None) if datum_type and (datum_type not in action_data_types): return False return True def get_policy_target(self, request, datum): """Provide the target for a policy request. This method is meant to be overridden to return target details when one of the policy checks requires them. E.g., {"user_id": datum.id} """ return {} def allowed(self, request, datum): """Determine whether this action is allowed for the current request. This method is meant to be overridden with more specific checks. """ return True def _allowed(self, request, datum): policy_check = utils_settings.import_setting("POLICY_CHECK_FUNCTION") if policy_check and self.policy_rules: target = self.get_policy_target(request, datum) return (policy_check(self.policy_rules, request, target) and self.allowed(request, datum)) return self.allowed(request, datum) def update(self, request, datum): """Allows per-action customization based on current conditions. This is particularly useful when you wish to create a "toggle" action that will be rendered differently based on the value of an attribute on the current row's data. By default this method is a no-op. """ pass def get_default_classes(self): """Returns a list of the default classes for the action. Defaults to ``["btn", "btn-default", "btn-sm"]``. """ return getattr(settings, "ACTION_CSS_CLASSES", ACTION_CSS_CLASSES) def get_default_attrs(self): """Returns a list of the default HTML attributes for the action. Defaults to returning an ``id`` attribute with the value ``{{ table.name }}__action_{{ action.name }}__{{ creation counter }}``. """ if self.datum is not None: bits = (self.table.name, "row_%s" % self.table.get_object_id(self.datum), "action_%s" % self.name) else: bits = (self.table.name, "action_%s" % self.name) return {"id": STRING_SEPARATOR.join(bits)} def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.name) def associate_with_table(self, table): self.table = table class Action(BaseAction): """Represents an action which can be taken on this table's data. .. attribute:: name Required. The short name or "slug" representing this action. This name should not be changed at runtime. .. attribute:: verbose_name A descriptive name used for display purposes. Defaults to the value of ``name`` with the first letter of each word capitalized. .. attribute:: verbose_name_plural Used like ``verbose_name`` in cases where ``handles_multiple`` is ``True``. Defaults to ``verbose_name`` with the letter "s" appended. .. attribute:: method The HTTP method for this action. Defaults to ``POST``. Other methods may or may not succeed currently. .. attribute:: requires_input Boolean value indicating whether or not this action can be taken without any additional input (e.g. an object id). Defaults to ``True``. .. attribute:: preempt Boolean value indicating whether this action should be evaluated in the period after the table is instantiated but before the data has been loaded. This can allow actions which don't need access to the full table data to bypass any API calls and processing which would otherwise be required to load the table. .. attribute:: allowed_data_types A list that contains the allowed data types of the action. If the datum's type is in this list, the action will be shown on the row for the datum. Default to be an empty list (``[]``). When set to empty, the action will accept any kind of data. .. attribute:: policy_rules list of scope and rule tuples to do policy checks on, the composition of which is (scope, rule) * scope: service type managing the policy for action * rule: string representing the action to be checked .. code-block:: python for a policy that requires a single rule check: policy_rules should look like "(("compute", "compute:create_instance"),)" for a policy that requires multiple rule checks: rules should look like "(("identity", "identity:list_users"), ("identity", "identity:list_roles"))" At least one of the following methods must be defined: .. method:: single(self, data_table, request, object_id) Handler for a single-object action. .. method:: multiple(self, data_table, request, object_ids) Handler for multi-object actions. .. method:: handle(self, data_table, request, object_ids) If a single function can work for both single-object and multi-object cases then simply providing a ``handle`` function will internally route both ``single`` and ``multiple`` requests to ``handle`` with the calls from ``single`` being transformed into a list containing only the single object id. """ def __init__(self, single_func=None, multiple_func=None, handle_func=None, attrs=None, **kwargs): super(Action, self).__init__(**kwargs) self.method = kwargs.get('method', "POST") self.requires_input = kwargs.get('requires_input', True) self.verbose_name = kwargs.get('verbose_name', self.name.title()) self.verbose_name_plural = kwargs.get('verbose_name_plural', "%ss" % self.verbose_name) self.allowed_data_types = kwargs.get('allowed_data_types', []) self.icon = kwargs.get('icon', None) if attrs: self.attrs.update(attrs) # Don't set these if they're None if single_func: self.single = single_func if multiple_func: self.multiple = multiple_func if handle_func: self.handle = handle_func # Ensure we have the appropriate methods has_handler = hasattr(self, 'handle') and callable(self.handle) has_single = hasattr(self, 'single') and callable(self.single) has_multiple = hasattr(self, 'multiple') and callable(self.multiple) if has_handler or has_multiple: self.handles_multiple = True if not has_handler and (not has_single or has_multiple): cls_name = self.__class__.__name__ raise NotImplementedError('You must define either a "handle" ' 'method or a "single" or "multiple" ' 'method on %s.' % cls_name) if not has_single: def single(self, data_table, request, object_id): return self.handle(data_table, request, [object_id]) self.single = types.MethodType(single, self) if not has_multiple and self.handles_multiple: def multiple(self, data_table, request, object_ids): return self.handle(data_table, request, object_ids) self.multiple = types.MethodType(multiple, self) def get_param_name(self): """Returns the full POST parameter name for this action. Defaults to ``{{ table.name }}__{{ action.name }}``. """ return "__".join([self.table.name, self.name]) class LinkAction(BaseAction): """A table action which is simply a link rather than a form POST. .. attribute:: name Required. The short name or "slug" representing this action. This name should not be changed at runtime. .. attribute:: verbose_name A string which will be rendered as the link text. (Required) .. attribute:: url A string or a callable which resolves to a url to be used as the link target. You must either define the ``url`` attribute or override the ``get_link_url`` method on the class. .. attribute:: allowed_data_types A list that contains the allowed data types of the action. If the datum's type is in this list, the action will be shown on the row for the datum. Defaults to be an empty list (``[]``). When set to empty, the action will accept any kind of data. """ # class attribute name is used for ordering of Actions in table name = "link" ajax = False def __init__(self, attrs=None, **kwargs): super(LinkAction, self).__init__(**kwargs) self.method = kwargs.get('method', "GET") self.bound_url = kwargs.get('bound_url', None) self.name = kwargs.get('name', self.name) self.verbose_name = kwargs.get('verbose_name', self.name.title()) self.url = kwargs.get('url', None) self.allowed_data_types = kwargs.get('allowed_data_types', []) self.icon = kwargs.get('icon', None) self.kwargs = kwargs self.action_type = kwargs.get('action_type', 'default') if not kwargs.get('verbose_name', None): raise NotImplementedError('A LinkAction object must have a ' 'verbose_name attribute.') if attrs: self.attrs.update(attrs) if self.ajax: self.classes = list(self.classes) + ['ajax-update'] def get_ajax_update_url(self): table_url = self.table.get_absolute_url() params = urlencode( OrderedDict([("action", self.name), ("table", self.table.name)]) ) return "%s?%s" % (table_url, params) def render(self, **kwargs): action_dict = copy.copy(kwargs) action_dict.update({"action": self, "is_single": True}) return render_to_string("horizon/common/_data_table_action.html", action_dict) def associate_with_table(self, table): super(LinkAction, self).associate_with_table(table) if self.ajax: self.attrs['data-update-url'] = self.get_ajax_update_url() def get_link_url(self, datum=None): """Returns the final URL based on the value of ``url``. If ``url`` is callable it will call the function. If not, it will then try to call ``reverse`` on ``url``. Failing that, it will simply return the value of ``url`` as-is. When called for a row action, the current row data object will be passed as the first parameter. """ if not self.url: raise NotImplementedError('A LinkAction class must have a ' 'url attribute or define its own ' 'get_link_url method.') if callable(self.url): return self.url(datum, **self.kwargs) try: if datum: obj_id = self.table.get_object_id(datum) return urls.reverse(self.url, args=(obj_id,)) else: return urls.reverse(self.url) except urls.NoReverseMatch as ex: LOG.info('No reverse found for "%(url)s": %(exception)s', {'url': self.url, 'exception': ex}) return self.url class FilterAction(BaseAction): """A base class representing a filter action for a table. .. attribute:: name The short name or "slug" representing this action. Defaults to ``"filter"``. .. attribute:: verbose_name A descriptive name used for display purposes. Defaults to the value of ``name`` with the first letter of each word capitalized. .. attribute:: param_name A string representing the name of the request parameter used for the search term. Default: ``"q"``. .. attribute:: filter_type A string representing the type of this filter. If this is set to ``"server"`` then ``filter_choices`` must also be provided. Default: ``"query"``. .. attribute:: filter_choices Required for server type filters. A tuple of tuples representing the filter options. Tuple composition should evaluate to (string, string, boolean, string, boolean), representing the following: * The first value is the filter parameter. * The second value represents display value. * The third optional value indicates whether or not it should be applied to the API request as an API query attribute. API type filters do not need to be accounted for in the filter method since the API will do the filtering. However, server type filters in general will need to be performed in the filter method. By default this attribute is not provided (``False``). * The fourth optional value is used as help text if provided. The default is ``None`` which means no help text. * The fifth optional value determines whether or not the choice is displayed to users. It defaults to ``True``. This is useful when the choice needs to be displayed conditionally. .. attribute:: needs_preloading If True, the filter function will be called for the initial GET request with an empty ``filter_string``, regardless of the value of ``method``. """ # TODO(gabriel): The method for a filter action should be a GET, # but given the form structure of the table that's currently impossible. # At some future date this needs to be reworked to get the filter action # separated from the table's POST form. # class attribute name is used for ordering of Actions in table name = "filter" def __init__(self, **kwargs): super(FilterAction, self).__init__(**kwargs) self.method = kwargs.get('method', "POST") self.name = kwargs.get('name', self.name) self.verbose_name = kwargs.get('verbose_name', _("Filter")) self.filter_type = kwargs.get('filter_type', "query") self.filter_choices = kwargs.get('filter_choices') self.needs_preloading = kwargs.get('needs_preloading', False) self.param_name = kwargs.get('param_name', 'q') self.icon = "search" if self.filter_type == 'server' and self.filter_choices is None: raise NotImplementedError( 'A FilterAction object with the ' 'filter_type attribute set to "server" must also have a ' 'filter_choices attribute.') def get_param_name(self): """Returns the full query parameter name for this action. Defaults to ``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``. """ return "__".join([self.table.name, self.name, self.param_name]) def assign_type_string(self, table, data, type_string): for datum in data: setattr(datum, table._meta.data_type_name, type_string) def data_type_filter(self, table, data, filter_string): filtered_data = [] for data_type in table._meta.data_types: func_name = "filter_%s_data" % data_type filter_func = getattr(self, func_name, None) if not filter_func and not callable(filter_func): # The check of filter function implementation should happen # in the __init__. However, the current workflow of DataTable # and actions won't allow it. Need to be fixed in the future. cls_name = self.__class__.__name__ raise NotImplementedError( "You must define a %(func_name)s method for %(data_type)s" " data type in %(cls_name)s." % {'func_name': func_name, 'data_type': data_type, 'cls_name': cls_name}) _data = filter_func(table, data, filter_string) self.assign_type_string(table, _data, data_type) filtered_data.extend(_data) return filtered_data def filter(self, table, data, filter_string): """Provides the actual filtering logic. This method must be overridden by subclasses and return the filtered data. """ return data def is_api_filter(self, filter_field): """Determine if agiven filter field should be used as an API filter.""" if self.filter_type == 'server': for choice in self.filter_choices: if (choice[0] == filter_field and len(choice) > 2 and choice[2]): return True return False def get_select_options(self): """Provide the value, string, and help_text for the template to render. help_text is returned if applicable. """ if self.filter_choices: return [choice[:4] for choice in self.filter_choices # Display it If the fifth element is True or does not exist if len(choice) < 5 or choice[4]] class NameFilterAction(FilterAction): """A filter action for name property.""" def filter(self, table, items, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [item for item in items if query in item.name.lower()] class FixedFilterAction(FilterAction): """A filter action with fixed buttons.""" def __init__(self, **kwargs): super(FixedFilterAction, self).__init__(**kwargs) self.filter_type = kwargs.get('filter_type', "fixed") self.needs_preloading = kwargs.get('needs_preloading', True) self.fixed_buttons = self.get_fixed_buttons() self.filter_string = '' def filter(self, table, images, filter_string): self.filter_string = filter_string categories = self.categorize(table, images) self.categories = defaultdict(list, categories) for button in self.fixed_buttons: button['count'] = len(self.categories[button['value']]) if not filter_string: return images return self.categories[filter_string] def get_fixed_buttons(self): """Returns a list of dict describing fixed buttons used for filtering. Each list item should be a dict with the following keys: * ``text``: Text to display on the button * ``icon``: Icon class for icon element (inserted before text). * ``value``: Value returned when the button is clicked. This value is passed to ``filter()`` as ``filter_string``. """ return [] def categorize(self, table, rows): """Override to separate rows into categories. To have filtering working properly on the client, each row will need CSS class(es) beginning with 'category-', followed by the value of the fixed button. Return a dict with a key for the value of each fixed button, and a value that is a list of rows in that category. """ return {} class BatchAction(Action): """A table action which takes batch action on one or more objects. This action should not require user input on a per-object basis. .. attribute:: name A short name or "slug" representing this action. Should be one word such as "delete", "add", "disable", etc. .. method:: action_present Method returning a present action name. This is used as an action label. Method must accept an integer/long parameter and return the display forms of the name properly pluralised (depending on the integer) and translated in a string or tuple/list. The returned display form is highly recommended to be a complete action name with a form of a transitive verb and an object noun. Each word is capitalized and the string should be marked as translatable. If tuple or list - then setting self.current_present_action = n will set the current active item from the list(action_present[n]) .. method:: action_past Method returning a past action name. This is usually used to display a message when the action is completed. Method must accept an integer/long parameter and return the display forms of the name properly pluralised (depending on the integer) and translated in a string or tuple/list. The detail is same as that of ``action_present``. .. attribute:: success_url Optional location to redirect after completion of the delete action. Defaults to the current page. .. attribute:: help_text Optional message for providing an appropriate help text for the horizon user. """ help_text = _("This action cannot be undone.") def __init__(self, **kwargs): super(BatchAction, self).__init__(**kwargs) action_present_method = callable(getattr(self, 'action_present', None)) action_past_method = callable(getattr(self, 'action_past', None)) if not action_present_method or not action_past_method: raise NotImplementedError( 'The %s BatchAction class must have both action_past and ' 'action_present methods.' % self.__class__.__name__ ) self.success_url = kwargs.get('success_url', None) # If setting a default name, don't initialize it too early self.verbose_name = kwargs.get('verbose_name', self._get_action_name) self.verbose_name_plural = kwargs.get( 'verbose_name_plural', lambda: self._get_action_name('plural')) self.current_present_action = 0 self.current_past_action = 0 # Keep record of successfully handled objects self.success_ids = [] self.help_text = kwargs.get('help_text', self.help_text) def _allowed(self, request, datum=None): # Override the default internal action method to prevent batch # actions from appearing on tables with no data. if not self.table.data and not datum: return False return super(BatchAction, self)._allowed(request, datum) def _get_action_name(self, items=None, past=False): """Retreive action name based on the number of items and `past` flag. :param items: A list or tuple of items (or container with a __len__ method) to count the number of concerned items for which this method is called. When this method is called for a single item (by the BatchAction itself), this parameter can be omitted and the number of items will be considered as "one". If we want to evaluate to "zero" this parameter must not be omitted (and should be an empty container). :param past: Boolean flag indicating if the action took place in the past. By default a present action is considered. """ action_type = "past" if past else "present" if items is None: # Called without items parameter (by a single instance.) count = 1 else: count = len(items) action_attr = getattr(self, "action_%s" % action_type)(count) if isinstance(action_attr, (six.string_types, Promise)): action = action_attr else: toggle_selection = getattr(self, "current_%s_action" % action_type) action = action_attr[toggle_selection] return action def action(self, request, datum_id): """Accepts a single object id and performs the specific action. This method is required. Return values are discarded, errors raised are caught and logged. """ def update(self, request, datum): """Switches the action verbose name, if needed.""" if getattr(self, 'action_present', False): self.verbose_name = self._get_action_name() self.verbose_name_plural = self._get_action_name('plural') def get_success_url(self, request=None): """Returns the URL to redirect to after a successful action.""" if self.success_url: return self.success_url return request.get_full_path() def get_default_attrs(self): """Returns a list of the default HTML attributes for the action.""" attrs = super(BatchAction, self).get_default_attrs() attrs.update({'data-batch-action': 'true'}) return attrs def handle(self, table, request, obj_ids): action_success = [] action_failure = [] action_not_allowed = [] for datum_id in obj_ids: datum = table.get_object_by_id(datum_id) datum_display = table.get_object_display(datum) or datum_id if not table._filter_action(self, request, datum): action_not_allowed.append(datum_display) LOG.warning(u'Permission denied to %(name)s: "%(dis)s"', { 'name': self._get_action_name(past=True).lower(), 'dis': datum_display }) continue try: self.action(request, datum_id) # Call update to invoke changes if needed self.update(request, datum) action_success.append(datum_display) self.success_ids.append(datum_id) LOG.info(u'%(action)s: "%(datum_display)s"', {'action': self._get_action_name(past=True), 'datum_display': datum_display}) except Exception as ex: handled_exc = isinstance(ex, exceptions.HandledException) if handled_exc: # In case of HandledException, an error message should be # handled in exceptions.handle() or other logic, # so we don't need to handle the error message here. # NOTE(amotoki): To raise HandledException from the logic, # pass escalate=True and do not pass redirect argument # to exceptions.handle(). # If an exception is handled, the original exception object # is stored in ex.wrapped[1]. ex = ex.wrapped[1] else: # Handle the exception but silence it since we'll display # an aggregate error message later. Otherwise we'd get # multiple error messages displayed to the user. action_failure.append(datum_display) action_description = ( self._get_action_name(past=True).lower(), datum_display) LOG.warning( 'Action %(action)s Failed for %(reason)s', { 'action': action_description, 'reason': ex}) # Begin with success message class, downgrade to info if problems. success_message_level = messages.success if action_not_allowed: msg = _('You are not allowed to %(action)s: %(objs)s') params = {"action": self._get_action_name(action_not_allowed).lower(), "objs": functions.lazy_join(", ", action_not_allowed)} messages.error(request, msg % params) success_message_level = messages.info if action_failure: msg = _('Unable to %(action)s: %(objs)s') params = {"action": self._get_action_name(action_failure).lower(), "objs": functions.lazy_join(", ", action_failure)} messages.error(request, msg % params) success_message_level = messages.info if action_success: msg = _('%(action)s: %(objs)s') params = {"action": self._get_action_name(action_success, past=True), "objs": functions.lazy_join(", ", action_success)} success_message_level(request, msg % params) return shortcuts.redirect(self.get_success_url(request)) class DeleteAction(BatchAction): """A table action used to perform delete operations on table data. .. attribute:: name A short name or "slug" representing this action. Defaults to 'delete' .. method:: action_present Method returning a present action name. This is used as an action label. Method must accept an integer/long parameter and return the display forms of the name properly pluralised (depending on the integer) and translated in a string or tuple/list. The returned display form is highly recommended to be a complete action name with a form of a transitive verb and an object noun. Each word is capitalized and the string should be marked as translatable. If tuple or list - then setting self.current_present_action = n will set the current active item from the list(action_present[n]) .. method:: action_past Method returning a past action name. This is usually used to display a message when the action is completed. Method must accept an integer/long parameter and return the display forms of the name properly pluralised (depending on the integer) and translated in a string or tuple/list. The detail is same as that of ``action_present``. .. attribute:: success_url Optional location to redirect after completion of the delete action. Defaults to the current page. .. attribute:: help_text Optional message for providing an appropriate help text for the horizon user. """ name = "delete" def __init__(self, **kwargs): super(DeleteAction, self).__init__(**kwargs) self.name = kwargs.get('name', self.name) self.icon = "trash" self.action_type = "danger" def action(self, request, obj_id): """Action entry point. Overrides base class' action method. Accepts a single object id passing it over to the delete method responsible for the object's destruction. """ return self.delete(request, obj_id) def delete(self, request, obj_id): """Required. Deletes an object referenced by obj_id. Override to provide delete functionality specific to your data. """ class handle_exception_with_detail_message(object): """Decorator to allow special exception handling in BatchAction.action(). An exception from BatchAction.action() or DeleteAction.delete() is normally caught by BatchAction.handle() and BatchAction.handle() displays an aggregated error message. However, there are cases where we would like to provide an error message which explains a failure reason if some exception occurs so that users can understand situation better. This decorator allows us to do this kind of special handling easily. This can be applied to BatchAction.action() and DeleteAction.delete() methods. :param normal_log_message: Log message template when an exception other than ``target_exception`` is detected. Keyword substituion "%(id)s" and "%(exc)s" can be used. :param target_exception: Exception class should be handled specially. If this exception is caught, a log message will be logged using ``target_log_message`` and a user visible will be shown using ``target_user_message``. In this case, an aggregated error message generated by BatchAction.handle() does not include an object which causes this exception. :param target_log_message: Log message template when an exception specified in ``target_exception`` is detected. Keyword substituion "%(id)s" and "%(exc)s" can be used. :param target_user_message: User visible message template when an exception specified in ``target_exception`` is detected. It is recommended to use an internationalized string. Keyword substituion "%(name)s" and "%(exc)s" can be used. :param logger_name: (optional) Logger name to be used. The usual pattern is to pass __name__ of a caller. This allows us to show a module name of a caller in a logged message. """ def __init__(self, normal_log_message, target_exception, target_log_message, target_user_message, logger_name=None): self.logger = logging.getLogger(logger_name or __name__) self.normal_log_message = normal_log_message self.target_exception = target_exception self.target_log_message = target_log_message self.target_user_message = target_user_message def __call__(self, fn): @functools.wraps(fn) def decorated(instance, request, obj_id): try: fn(instance, request, obj_id) except self.target_exception as e: self.logger.info(self.target_log_message, {'id': obj_id, 'exc': e}) obj = instance.table.get_object_by_id(obj_id) name = instance.table.get_object_display(obj) msg = self.target_user_message % {'name': name, 'exc': e} # 'escalate=True' is required to notify the caller # (DeleteAction) of the failure. exceptions.handle() will # raise a wrapped exception of HandledException and BatchAction # will handle it. 'redirect' should not be passed here as # 'redirect' has a priority over 'escalate' argument. exceptions.handle(request, msg, escalate=True) except Exception as e: self.logger.info(self.normal_log_message, {'id': obj_id, 'exc': e}) # NOTE: No exception handling is required here because # BatchAction.handle() does it. What we need to do is # just to re-raise the exception. raise return decorated
{ "content_hash": "2f614f78be57e4e44292e7143389d7a1", "timestamp": "", "source": "github", "line_count": 951, "max_line_length": 80, "avg_line_length": 40.09358569926393, "alnum_prop": 0.6130504340528207, "repo_name": "noironetworks/horizon", "id": "1011bdaa3be747ab486f6161b6c5066896c581b8", "size": "38734", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "horizon/tables/actions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "129247" }, { "name": "HTML", "bytes": "581169" }, { "name": "JavaScript", "bytes": "2455930" }, { "name": "Python", "bytes": "5190295" }, { "name": "Shell", "bytes": "7108" } ], "symlink_target": "" }
import re from eventlet import greenthread import powervc.common.config as cfg from powervc.common.gettextutils import _ from nova.compute import flavors from nova import exception from nova import db from oslo_log import log as logging from nova.openstack.common import loopingcall from powervc.nova.driver.compute import constants LOG = logging.getLogger(__name__) CONF = cfg.CONF def periodic_flavor_sync(ctx, driver, scg_id_list): """ Periodically update the flavors from PowerVC. A default time of 300 seconds is specified for the refresh interval. if the refresh interval is set to 0, then flavors are not refreshed. """ sync_interval = CONF.powervc.flavor_sync_interval if sync_interval is None or sync_interval == 0: return def flavors_sync(driver, scg_id_list): FlavorSync(driver, scg_id_list).synchronize_flavors(ctx) LOG.debug('Flavors synchronization completed') sync_flavors = loopingcall.FixedIntervalLoopingCall(flavors_sync, driver, scg_id_list) sync_flavors.start(interval=sync_interval, initial_delay=sync_interval) class FlavorSync(): """A class that synchorizes the flavors. The functionality provided here is called by the manager. The driver provided interfaces to the PowerVC. """ def __init__(self, driver, scg_id_list): self.driver = driver self.prefix = CONF.powervc.flavor_prefix self.scg_id_list = scg_id_list def synchronize_flavors(self, ctx): """ Get a list of all public flavors from PowerVC. If it is in configuration white list, and not in black list, insert it. if it is already in local tables, ignore it. """ LOG.info(_("Flavors synchronization starts.")) # Get all public flavors. By default, detail and public is set. pvcFlavors = self.driver.list_flavors() # Sync flavors in list for flavor in pvcFlavors: LOG.info(_("Flavor:%s") % str(flavor)) greenthread.sleep(0) # This check is added to eliminate sync of private flavors # Can be removed once PowerVC fixes to return only public flavors # by default. if not(flavor.__dict__.get(constants.IS_PUBLIC)): continue if (self._check_for_sync(flavor.name)): response = self._check_for_extraspecs(flavor) if response is not None: self._sync_flavor(ctx, flavor, response[1]) LOG.info(_("Flavors synchronization ends.")) def _sanitize(self, opts_list): """ Remove any whitespace only list values """ for opt in opts_list: if len(opt.strip()) == 0: opts_list.remove(opt) return opts_list def get_flavors_white_list(self): """ Get the flavors to sync from the powervc conf file """ return self._sanitize(CONF.powervc.flavor_white_list) def get_flavors_black_list(self): """ Get the black listed flavors from the powervc conf file """ return self._sanitize(CONF.powervc.flavor_black_list) def _check_for_sync(self, fl_name): """ Check the white/black lists to determine if sync candidate """ fl_sync = True # Get the list of flavors names to sync. fl_wlist = self.get_flavors_white_list() fl_blist = self.get_flavors_black_list() if (len(fl_wlist) != 0): fl_sync = self._regex_comp(fl_name, fl_wlist) if (fl_sync and (len(fl_blist) != 0)): fl_sync = not(self._regex_comp(fl_name, fl_blist)) return fl_sync def _regex_comp(self, name, flist): """ Make a regex comparison for name in the list Return a boolean True if found in the list """ if name in flist: return True for item in flist: p = re.compile(item) match = p.match(name) if (match is not None): return True return False def _sync_flavor(self, ctx, flavor, extra_specs): """ Insert the flavor with extra specs if not in local database """ flavor_in_local_db = None flavor_name = self.prefix + flavor.name try: flavor_in_local_db = db.flavor_get_by_name(ctx, flavor_name) except exception.FlavorNotFoundByName: self._insert_pvc_flavor_extraspecs(ctx, flavor, extra_specs) # Update the extra_speces of the flavor if flavor_in_local_db is not None: flavor_id = flavor_in_local_db.get('flavorid', '') if (flavor_id is not '' and extra_specs): self._update_flavor_extraspecs(ctx, flavor_id, extra_specs) def _check_for_extraspecs(self, flavor): """ Check for valid extraspecs defined and to be synced. The method returns the following values: (True, None) - flavor to be synced, and no extra specs defined. (True, extraspecs) - flavor to be synced with the extra specs defined. None - scg connectivity group defined in extraspecs is not supported, and flavor not to be synced. Checking for scg to be removed when powervc driver supports multiple scgs """ flavor_extraspecs = self.driver.get_flavor_extraspecs(flavor) if flavor_extraspecs: scg_key = constants.SCG_KEY if scg_key in flavor_extraspecs: if not self.scg_id_list: return None if not flavor_extraspecs[scg_key] in self.scg_id_list: return None return (True, flavor_extraspecs) def _insert_pvc_flavor_extraspecs(self, context, flavor, extra_specs): """ Insert the flavor and extra specs if any """ flavor_created = self._create_flavor(context, flavor) if flavor_created and extra_specs: self._update_flavor_extraspecs(context, flavor_created.get('flavorid'), extra_specs) def _update_flavor_extraspecs(self, context, flavorid, flavor_extraspecs): """ Insert the flavor extra specs """ db.flavor_extra_specs_update_or_create(context, flavorid, flavor_extraspecs) def _create_flavor(self, context, flavor): """ Create and insert the flavor """ flavor_dict = flavor.__dict__ name = self.prefix + flavor.name flavorid = self.prefix + flavor.id memory = flavor.ram vcpus = flavor.vcpus root_gb = flavor.disk ephemeral_gb = flavor_dict.get('OS-FLV-EXT-DATA:ephemeral', 0) u_swap = flavor_dict.get('swap', 0) rxtx_factor = flavor_dict.get('rxtx_factor', 1.0) is_public = flavor_dict.get('os-flavor-access:is_public', True) if u_swap == "": swap = 0 else: swap = int(u_swap) try: return flavors.create(name, memory, vcpus, root_gb, ephemeral_gb=ephemeral_gb, flavorid=flavorid, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) except Exception as exc: LOG.error(_("Unable to sync flavor " + str(name) + ". " + str(exc.format_message()))) return None
{ "content_hash": "d03914bc5fe3a1acb9ce257fb356e8ad", "timestamp": "", "source": "github", "line_count": 206, "max_line_length": 79, "avg_line_length": 38.422330097087375, "alnum_prop": 0.5648768161718256, "repo_name": "openstack/powervc-driver", "id": "3b720b3cda2410bd66a17fe450e76fd30cb40336", "size": "7949", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nova-powervc/powervc/nova/driver/virt/powervc/sync/flavorsync.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "1032084" }, { "name": "Shell", "bytes": "36450" } ], "symlink_target": "" }
#!/usr/bin/env python __author__ = 'Darrien' # sys is needed to skip the python script when updating # os is needed for changing the working directory, and making folders import sys import os # Kills the script in order to return to the bash script that called it. def update_kill(): print "Killing main script..." sys.exit("Updating... ") # Creates a folder if there is none already there # Also returns the path to the final folder created def folder_creation(sub_dir, appender): # Using the subdirectory obtained in the main method, gets the full directory path filename = "/var/www/F14Courses/" + str(sub_dir) filename += "/Assignment" + appender # Checks to see if the folder already exists if not os.path.exists(filename): os.makedirs(filename) print "Folder: \'Assignment" + appender + "\' created." return filename else: print "Folder already exists. Folder not created." return filename # Creates how_many number of assignments using an outline # how_many = number of assignments wanted, a_num = assignment number def assignment_creator(how_many, a_num, script_ver): thing = int(how_many) safe_text = open("safe_text.txt", "w") safe_text.write(how_many) start = "Assignment" + a_num + "pe" # Loop to make assignments until how_many for i in range(thing): thing = i + 1 if thing < 10: ender = "0" + str(thing) else: ender = str(thing) file_name = str(start + ender + ".c") # Checking to see if file exists - will not overwrite files already there if os.path.exists(file_name): print "%s exists. New file not created." % file_name else: line = "/*****************************************************/\n" filler = "/*" + " "*51 + "*/\n" new_file = open(file_name, "w") new_file.write(line) new_file.write("/* Programmer: Darrien Bradley Glasser */\n") new_file.write(filler) new_file.write("/* Program: Assignment " + str(i + 1) + " */\n") new_file.write(filler) new_file.write("/* Approximate Completion Time: TO BE FILLED */\n") new_file.write(line) new_file.write("\n/*\nEnter description of program here.\n*/\n") new_file.write("\n#include <stdio.h>\n\n") new_file.write("int main(int argc, char*argv[]){\n") new_file.write("\n\n\treturn 0;\n}") new_file.write("\n\n/* Headers autogenerated by Darrien's Server Automation Script version: " + script_ver + " */") new_file.close() print "New file " + file_name + " created." # Modifies existing HTML files, and generates new ones in order to display all files on darrieng.raspctl.com # how_many = num of assignments, a_num = assignment number, direc = 101, 102, etc. def html_gen(how_many, a_num, direc, script_ver): # 'big_file' is the file that will contain all parts of the final HTML file # 'top' contains the headers, 'links' contain links to the assignment folders, 'bottom' contains the closing tags # VERY IMPORTANT: links must not be opened in a+, will be filled with garbage otherwise. See C: I/O bug direc = str(direc) big_file = open(direc + ".html", "w") top = open("top.html", "r") links = open("links.html", "a") bottom = open("bottom.html", "r") print "Modding: " + direc + ".html" # Appending new assignment to links.html links.write("<li><a href=\"/F14Courses/" + direc + "/Assignment" + a_num + ".html\">Assignment" + a_num + "</a></li>") links.close() links = open("links.html", "r") # Adding top, links, and bottom together to make the final file big_file.write(top.read()) big_file.write(links.read()) big_file.write(bottom.read()) big_file.close(); top.close(); links.close(); bottom.close() print "File modifications completed." # Move to directory with new assignments in it os.chdir("Assignment" + a_num) # Generating new HTML file in AssignmentXX folder pointing towards assignments # Printing periods signifies that the server has not frozen print "Creating Assignment" + a_num + ".html" new_assignment = open("Assignment" + a_num + ".html", "w") print ".", new_assignment.write("<!DOCTYPE html PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n") new_assignment.write("<html>\n") new_assignment.write(" <head>\n") new_assignment.write(" <title>\n") new_assignment.write(" Assignment" + a_num) new_assignment.write(" </title>\n") new_assignment.write(" </head>\n") new_assignment.write(" <body>\n") new_assignment.write(" <h1>\n") new_assignment.write(" Assignment" + a_num + "\n") new_assignment.write(" </h1>\n") new_assignment.write(" <nav>\n") new_assignment.write(" <ul>") print ".", # Adding as many links as the user asked for for i in range(int(how_many)): thing = i + 1 if thing < 10: num = "0" + str(thing) new_assignment.write("\n") new_assignment.write("<li><a href=\"/F14Courses/101/Assignment" + a_num + "pe" + num + ".c\">Assignment" + a_num + "pe" + num + "</a></li>") new_assignment.write("\n") print ".", new_assignment.write("\n </ul>\n") new_assignment.write(" <nav>\n") new_assignment.write("<p>Page autogenerated by Darrien's Server Automation Script version: " + script_ver + "</p>\n") new_assignment.write(" </body>\n") new_assignment.write("</html>\n") new_assignment.close() print "\nAssignment" + a_num + ".html created." print "HTML file generation completed." if __name__ == "__main__": # Opening print statements script_ver = "0.2.7" print "\n\n#######################################\n" print "Server Automation Script version: " + script_ver print "BETA VERSION: Script works, but may have odd quirks." print "\n#######################################\n" print "Welcome back Darrien!" # Setting directory to root for easy switching # retval will also save this directory for later os.chdir("..") retval = os.getcwd() # 'u' will kill the script, and go straight to the bash script for updating all files # 'F1' corresponds to Freshman year semester I. This pattern is expected to be carried forward while True: folder = raw_input("'u' = update or Y/Semester #\n> ") if folder not in ("u", "F1", "f1", "F2", "f2"): print "Typo. Please enter input again." else: break if folder == 'u': update_kill() print "Please enter the assignment number." a_num = raw_input("> ") if a_num < 10: appender = "0" + str(a_num) else: appender = str(a_num) print "How many assignments?" how_many = raw_input("> ") if folder in ("f1", "F1"): direc = 101 elif folder in ("f2", "F2"): direc = 102 else: f_path = "NOOOOOOOOOOOOOOO" print "This should not happen. Spontaneous failure" sys.exit("FAILURE") # Creating a folder for the new assignment, and then returning the path to it in f_path (file_path) f_path = folder_creation(direc, appender) # Change directory to inside newly created folder, then create as many files as the user asked for os.chdir(f_path) assignment_creator(how_many, appender, script_ver) # A file read/created in assignment_creator # Assures that an HTML file with more assignments is not overwritten with an HTML file with fewer assignments safe_text = open("safe_text.txt") occupied = safe_text.read() if occupied < how_many: print "Number of assignments wanted is less than number of assignments created." sys.exit("Skipping HTML gen script.") # Moving up one directory from AssignmentXXpeXX to 101 or 102 or 1XX os.chdir("..") # Running the HTML file generation/modification method html_gen(how_many, appender, direc, script_ver)
{ "content_hash": "9a5122cded498a1b8a508308718ce443", "timestamp": "", "source": "github", "line_count": 215, "max_line_length": 127, "avg_line_length": 39.246511627906976, "alnum_prop": 0.5854467883384689, "repo_name": "DarrienG/Server-Automation", "id": "5c6b69e85eed0a2957276c5ba9e106c2af57c9e8", "size": "8438", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "automate.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "8438" } ], "symlink_target": "" }
''' Author : Oguzhan Gencoglu Contact : [email protected] Created : 11.07.2015 Latest Version : 13.10.2015 Train a classifier for breath detection ''' from __future__ import absolute_import from __future__ import print_function from get_file_locs import get_file_locs import librosa import numpy as np from sklearn.svm import SVC from sklearn.cross_validation import train_test_split from sliding_window import sliding_window def rolling_window(a, window): shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) if __name__ == '__main__': # Load breathing samples breath_files = get_file_locs('data\\breathing', 'wav') b = np.array([]) b_dur = 0 sr_b = 22050 for i in range(len(breath_files)): print('\nReading file (breathing) number', str(i + 1)) breath_file = breath_files[i] temp_b, temp_sr_b = librosa.load(breath_file) temp_dur = temp_b.shape[0] / float(temp_sr_b) b_dur = b_dur + temp_dur print('\tFile (breathing) sampling rate :', str(temp_sr_b)) print('\tFile (breathing) duration :', "{0:.2f}".format(temp_dur), 'seconds') b = np.append(b, temp_b) # Load non-breathing samples nonbreath_files = get_file_locs('data\\non_breathing', 'wav') nb = np.array([]) nb_dur = 0 for j in range(len(nonbreath_files)): print('\nReading file (non-breathing) number', str(j + 1)) nonbreath_file = nonbreath_files[j] temp_nb, temp_sr_nb = librosa.load(nonbreath_file, duration = 8) # too much nonbreath temp_dur = temp_nb.shape[0] / float(temp_sr_nb) nb_dur = nb_dur + temp_dur print('\tFile (non-breathing) sampling rate :', str(temp_sr_nb)) print('\tFile (non-breathing) duration :', "{0:.2f}".format(temp_dur), 'seconds') nb = np.append(nb, temp_nb) print('\n\tTotal duration (breathing) :', "{0:.2f}".format(b_dur), 'seconds') print('\n\tTotal duration (non-breathing) :', "{0:.2f}".format(nb_dur), 'seconds') # windowing window_len = 1024 b_feat = sliding_window(b, window_len) nb_feat = sliding_window(nb, window_len) # zero mean scaling within each window b_feat = b_feat - np.transpose(np.tile(np.mean(b_feat, axis = 1), (b_feat.shape[1], 1))) nb_feat = nb_feat - np.transpose(np.tile(np.mean(nb_feat, axis = 1), (nb_feat.shape[1], 1))) # fft features b_feat = np.abs(np.fft.fft(sliding_window(b, window_len), axis = 1)) nb_feat = np.abs(np.fft.fft(sliding_window(nb, window_len), axis = 1)) all_feats = np.vstack((b_feat, nb_feat)) # create targets breath_targets = np.ones(b_feat.shape[0]) nonbreath_targets = np.zeros(nb_feat.shape[0]) all_targets = np.hstack((breath_targets, nonbreath_targets)) # Split data into training and test X_train, X_test, y_train, y_test = train_test_split(all_feats, all_targets, test_size=0.1) # Train SVM classifier classifier = SVC(kernel = 'linear') classifier.fit(X_train, y_train) pred = classifier.predict(X_test) print("Accuracy:", 100 * np.sum(pred == y_test)/float(pred.shape[0])) # print the details of the trained SVM coefs = classifier.coef_ print("Weigths of features (for linear case):", coefs) intercept = classifier.intercept_[0] print("Intercept term:", intercept) ''' # Save the trained weights np.savetxt('weights5.txt', coefs, delimiter='\n') np.savetxt('intercept5.txt', np.array([intercept])) '''
{ "content_hash": "57590542a82c47a4469d96a5701c31cc", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 96, "avg_line_length": 37.37, "alnum_prop": 0.6141289804656141, "repo_name": "ogencoglu/BreathDetection", "id": "377286d9ec33569b502794a50bd8dd935c9c11ab", "size": "3737", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "code/breath_classifier_fft.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "1551" }, { "name": "Python", "bytes": "22003" } ], "symlink_target": "" }
import pytest import numpy as np import murraylab_tools.echo as mt_echo @pytest.mark.skip(reason="tests not yet implmented") class TestEchoSourceMaterial(): def test_implement_me(self): assert 0
{ "content_hash": "4f60057272ec80beb5ac34a2d8902153", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 52, "avg_line_length": 23.22222222222222, "alnum_prop": 0.7464114832535885, "repo_name": "smsaladi/murraylab_tools", "id": "c7dcd5c9fa96e9bb38cffd586bed6ebc52477605", "size": "209", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "murraylab_tools/tests/echo_tests/test_echosourcematerial.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "97955" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import os import sys import errno import logging from tldp.ldpcollection import LDPDocumentCollection from tldp.utils import md5files, stem_and_ext from tldp.typeguesser import guess, knownextensions logger = logging.getLogger(__name__) IGNORABLE_SOURCE = ('index.sgml') def scansourcedirs(dirnames): '''return a dict() of all SourceDocuments discovered in dirnames dirnames: a list of directories containing SourceDocuments. scansourcedirs ensures it is operating on the absolute filesystem path for each of the source directories. If any of the supplied dirnames does not exist as a directory, the function will log the missing source directory names and then will raise an IOError and quit. For each document that it finds in a source directory, it creates a SourceDocument entry using the stem name as a key. The rules for identifying possible SourceDocuments go as follows. - Within any source directory, a source document can consist of a single file with an extension or a directory. - If the candidate entry is a directory, then, the stem is the full directory name, e.g. Masquerading-Simple-HOWTO - If the candidate entry is a file, the stem is the filename minus extension, e.g. Encrypted-Root-Filesystem-HOWTO Because the function accepts (and will scan) many source directories, it is possible that there will be stem name collisions. If it discovers a stem collision, SourceCollection will issue a warning and skip the duplicated stem(s). [It also tries to process the source directories and candidates in a stable order between runs.] ''' found = dict() dirs = [os.path.abspath(x) for x in dirnames] results = [os.path.exists(x) for x in dirs] if not all(results): for result, sdir in zip(results, dirs): logger.critical("Source collection dir must already exist: %s", sdir) raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), sdir) for sdir in sorted(dirs): logger.debug("Scanning for source documents in %s.", sdir) for fname in sorted(os.listdir(sdir)): candidates = list() possible = arg_issourcedoc(os.path.join(sdir, fname)) if possible: candidates.append(SourceDocument(possible)) else: logger.warning("Skipping non-document %s", fname) continue for candy in candidates: if candy.stem in found: dup = found[candy.stem].filename logger.warning("Ignoring duplicate is %s", candy.filename) logger.warning("Existing dup-entry is %s", dup) else: found[candy.stem] = candy logger.debug("Discovered %s source documents", len(found)) return found def arg_issourcedoc(filename): filename = os.path.abspath(filename) if os.path.isfile(filename): if os.path.basename(filename) in IGNORABLE_SOURCE: return None return filename elif os.path.isdir(filename): return sourcedoc_fromdir(filename) return None def sourcedoc_fromdir(name): candidates = list() if not os.path.isdir(name): return None stem = os.path.basename(name) for ext in knownextensions: possible = os.path.join(name, stem + ext) if os.path.isfile(possible): candidates.append(possible) if len(candidates) > 1: logger.warning("%s multiple document choices in dir %s, bailing....", stem, name) raise Exception("multiple document choices in " + name) elif len(candidates) == 0: return None else: doc = candidates.pop() logger.debug("%s identified main document %s.", stem, doc) return doc class SourceCollection(LDPDocumentCollection): '''a dict-like container for SourceDocument objects The key in the SourceCollection is the stem name of the document, which allows convenient access and guarantees non-collision. The use of the stem as a key works conveniently with the OutputCollection which uses the same strategy on OutputDirectory. ''' def __init__(self, dirnames=None): '''construct a SourceCollection delegates most responsibility to function scansourcedirs ''' if dirnames is None: return self.update(scansourcedirs(dirnames)) class SourceDocument(object): '''a class providing a container for each set of source documents ''' def __repr__(self): return '<%s:%s (%s)>' % \ (self.__class__.__name__, self.filename, self.doctype) def __init__(self, filename): '''construct a SourceDocument filename is a required parameter The filename is the main (and sometimes sole) document representing the source of the LDP HOWTO or Guide. It is the document that is passed by name to be handled by any document processing toolchains (see also tldp.doctypes). Each instantiation will raise an IOERror if the supplied filename does not exist or if the filename isn't a file (symlink is fine, directory or fifo is not). The remainder of the instantiation will set attributes that are useful later in the processing phase, for example, stem, status, enclosing directory name and file extension. There are two important attributes. First, the document type guesser will try to infer the doctype (from file extension and signature). Note that it is not a fatal error if document type cannot be guessed, but the document will not be able to be processed. Second, it is useful during the decision-making process to know if any of the source files are newer than the output files. Thus, the stat() information for every file in the source document directory (or just the single source document file) will be collected. ''' self.filename = os.path.abspath(filename) if not os.path.exists(self.filename): fn = self.filename logger.critical("Missing source document: %s", fn) raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), fn) if os.path.isdir(self.filename): self.filename = sourcedoc_fromdir(self.filename) elif os.path.isfile(self.filename): pass else: # -- we did not receive a useable document file or directory name self.filename = None if self.filename is None: fn = filename logger.critical("Source document is not a plain file: %s", fn) raise ValueError(fn + " not identifiable as a document") self.doctype = guess(self.filename) self.status = 'source' self.output = None self.working = None self.differing = set() self.dirname, self.basename = os.path.split(self.filename) self.stem, self.ext = stem_and_ext(self.basename) parentbase = os.path.basename(self.dirname) logger.debug("%s found source %s", self.stem, self.filename) if parentbase == self.stem: parentdir = os.path.dirname(self.dirname) self.md5sums = md5files(self.dirname, relative=parentdir) else: self.md5sums = md5files(self.filename, relative=self.dirname) def detail(self, widths, verbose, file=sys.stdout): '''produce a small tabular output about the document''' template = ' '.join(('{s.status:{w.status}}', '{s.doctype.__name__:{w.doctype}}', '{s.stem:{w.stem}}')) outstr = template.format(s=self, w=widths) print(outstr, file=file) if verbose: print(' doctype {}'.format(self.doctype), file=file) if self.output: print(' output dir {}'.format(self.output.dirname), file=file) print(' source file {}'.format(self.filename), file=file) for why, f in sorted(self.differing): fname = os.path.join(self.dirname, f) print(' {:>7} source {}'.format(why, fname), file=file) if self.output: for f in sorted(self.output.missing): print(' missing output {}'.format(f), file=file) # # -- end of file
{ "content_hash": "4fbac1ce701eb3434f2b66f4a037eab3", "timestamp": "", "source": "github", "line_count": 222, "max_line_length": 79, "avg_line_length": 39.03603603603604, "alnum_prop": 0.633856450496192, "repo_name": "tLDP/python-tldp", "id": "90846d0469c9ae0f3296ac696171ba842fcffef7", "size": "8760", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tldp/sources.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASL", "bytes": "10796" }, { "name": "CSS", "bytes": "1112" }, { "name": "Perl", "bytes": "17503" }, { "name": "Python", "bytes": "200943" }, { "name": "Shell", "bytes": "428" }, { "name": "XSLT", "bytes": "5408" } ], "symlink_target": "" }
from __future__ import absolute_import import os import re import subprocess from pex.common import is_exe from pex.tracer import TRACER from pex.typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Iterable, Iterator, Optional, Tuple import attr # vendor:skip else: from pex.third_party import attr @attr.s(frozen=True) class Pyenv(object): root = attr.ib() # type: str @classmethod def find(cls): # type: () -> Optional[Pyenv] """Finds the active pyenv installation if any.""" with TRACER.timed("Searching for pyenv root...", V=3): pyenv_root = os.environ.get("PYENV_ROOT", "") if not pyenv_root: for path_entry in os.environ.get("PATH", "").split(os.pathsep): pyenv_exe = os.path.join(path_entry, "pyenv") if is_exe(pyenv_exe): process = subprocess.Popen(args=[pyenv_exe, "root"], stdout=subprocess.PIPE) stdout, _ = process.communicate() if process.returncode == 0: pyenv_root = str(stdout).strip() break if pyenv_root: pyenv = cls(pyenv_root) TRACER.log("A pyenv installation was found: {}".format(pyenv), V=6) return pyenv TRACER.log("No pyenv installation was found.", V=6) return None @attr.s(frozen=True) class Shim(object): pyenv = attr.ib() # type: Pyenv path = attr.ib() # type: str name = attr.ib() # type: str major = attr.ib() # type: Optional[str] minor = attr.ib() # type: Optional[str] _SHIM_REGEX = re.compile( r""" ^ (?P<name> python | pypy ) (?: # Major version (?P<major>[2-9]) (?: \. # Minor version (?P<minor>[0-9]) # Some pyenv pythons include a suffix on the interpreter name, similar to # PEP-3149. For example, python3.6m to indicate it was built with pymalloc. [a-z]? )? )? $ """, flags=re.VERBOSE, ) @classmethod def parse(cls, pyenv, binary): # type: (Pyenv, str) -> Optional[Pyenv.Shim] """Parses shim information from a python binary path if it looks like a pyenv shim.""" if os.path.dirname(binary) != os.path.join(pyenv.root, "shims"): return None match = cls._SHIM_REGEX.match(os.path.basename(binary)) if match is None: return None return cls( pyenv=pyenv, path=binary, name=match.group("name"), major=match.group("major"), minor=match.group("minor"), ) _PYENV_CPYTHON_VERSION_LEADING_CHARS = frozenset(str(digit) for digit in range(2, 10)) def select_version(self, search_dir=None): # type: (Optional[str]) -> Optional[str] """Reports the active shim version for the given directory or $PWD. If the shim is not activated, returns `None`. """ with TRACER.timed("Calculating active version for {}...".format(self), V=6): active_versions = self.pyenv.active_versions(search_dir=search_dir) if active_versions: binary_name = os.path.basename(self.path) if self.name == "python" and not self.major and not self.minor: for pyenv_version in active_versions: if pyenv_version[0] in self._PYENV_CPYTHON_VERSION_LEADING_CHARS: TRACER.log( "{} has active version {}".format(self, pyenv_version), V=6 ) return self.pyenv.python(pyenv_version, binary_name=binary_name) prefix = "{name}{major}{minor}".format( name="" if self.name == "python" else self.name, major=self.major or "", minor=".{}".format(self.minor) if self.minor else "", ) for pyenv_version in active_versions: if pyenv_version.startswith(prefix): TRACER.log("{} has active version {}".format(self, pyenv_version), V=6) return self.pyenv.python(pyenv_version, binary_name=binary_name) TRACER.log("{} is not activated.".format(self), V=6) return None def as_shim(self, binary): # type: (str) -> Optional[Shim] """View the given binary path as a pyenv shim script if it is one.""" return self.Shim.parse(self, binary) @staticmethod def _read_pyenv_versions(version_file): # type: (str) -> Iterator[str] with open(version_file) as fp: for line in fp: for version in line.strip().split(): yield version @staticmethod def _find_local_version_file(search_dir): # type: (str) -> Optional[str] while True: local_version_file = os.path.join(search_dir, ".python-version") if os.path.exists(local_version_file): return local_version_file parent_dir = os.path.dirname(search_dir) if parent_dir == search_dir: return None search_dir = parent_dir def active_versions(self, search_dir=None): # type: (Optional[str]) -> Tuple[str, ...] """Reports the active pyenv versions for the given starting search directory or $PWD.""" source_and_versions = None # type: Optional[Tuple[str, Iterable[str]]] # See: https://github.com/pyenv/pyenv#choosing-the-python-version with TRACER.timed("Finding {} active versions...".format(self), V=6): shell_version = os.environ.get("PYENV_VERSION") if shell_version: source_and_versions = ( "PYENV_VERSION={}".format(shell_version), shell_version.split(":"), ) else: cwd = search_dir if search_dir is not None else os.getcwd() TRACER.log("Looking for pyenv version files starting from {}.".format(cwd), V=6) local_version = self._find_local_version_file(search_dir=cwd) if local_version: source_and_versions = (local_version, self._read_pyenv_versions(local_version)) else: global_version = os.path.join(self.root, "version") if os.path.exists(global_version): source_and_versions = ( global_version, self._read_pyenv_versions(global_version), ) if source_and_versions: source, versions = source_and_versions TRACER.log("Found active versions in {}: {}".format(source, versions), V=6) return tuple(versions) TRACER.log("Found no active pyenv versions.", V=6) return () def python( self, pyenv_version, # type: str binary_name=None, # type: Optional[str] ): # type: (...) -> Optional[str] """Return the path of the python binary for the given pyenv version. Returns `None` if the given pyenv version is not installed. """ # N.B.: Pyenv creates a 'python' symlink for both the CPython and PyPy versions it installs; # so counting on 'python' is OK here. We do resolve the symlink though to return a canonical # direct path to the python binary. binary_name = binary_name or "python" python = os.path.realpath( os.path.join(self.root, "versions", pyenv_version, "bin", binary_name) ) return python if is_exe(python) else None
{ "content_hash": "97ebb70d491d707b94a36054db30bda6", "timestamp": "", "source": "github", "line_count": 209, "max_line_length": 100, "avg_line_length": 39.71291866028708, "alnum_prop": 0.5178313253012048, "repo_name": "pantsbuild/pex", "id": "0a99a1e2cab02ea7fe5242e95c9a2118fbce5798", "size": "8432", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "pex/pyenv.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1379" }, { "name": "Python", "bytes": "2190044" }, { "name": "Shell", "bytes": "1472" } ], "symlink_target": "" }
from ij import IJ from ij.gui import Roi, ShapeRoi import os import os.path as op import glob import string import shutil as sh import fnmatch as fn import errno print ["%s\n"%i for i in dir(IJ)] IJ.run("Close All"); """ Power=getTag("DAC2_561-Volts"); Gain=getTag("Hamamatsu_DCAM-EMGain"); AcqTime=getTag("Exposure-ms"); def readMeta(tag) { info = getImageInfo(); index1 = indexOf(info, tag); if (index1==-1) return ""; index1 = indexOf(info, ":", index1); if (index1==-1) return ""; index2 = indexOf(info, "\n", index1); value = substring(info, index1+1, index2); return value; } """ dr = IJ.getDirectory("Choose a Directory "); for root, dirs, files in os.walk(dr): try: for f in files: if fn.fnmatch(f, '*meta*'): with open(op.join(root,f), "r") as inpt: for i in inpt: print i
{ "content_hash": "464ac98ec09a5790460fefc81914c2f6", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 50, "avg_line_length": 22.048780487804876, "alnum_prop": 0.5929203539823009, "repo_name": "moosekaka/macros-for-imageJ", "id": "48a10e6e71ee807e203934aa0cd33c3caa042f1b", "size": "950", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "deprecated/FP stack pytho.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "950" } ], "symlink_target": "" }
import six import unittest import bmemcached from bmemcached.exceptions import MemcachedException if six.PY3: from unittest import mock else: import mock class TestMemcachedErrors(unittest.TestCase): def testGet(self): """ Raise MemcachedException if request wasn't successful and wasn't a 'key not found' error. """ client = bmemcached.Client('127.0.0.1:11211', 'user', 'password') with mock.patch.object(bmemcached.client.Protocol, '_get_response') as mocked_response: mocked_response.return_value = (0, 0, 0, 0, 0, 0x81, 0, 0, 0, 0) self.assertRaises(MemcachedException, client.get, 'foo') def testSet(self): """ Raise MemcachedException if request wasn't successful and wasn't a 'key not found' or 'key exists' error. """ client = bmemcached.Client('127.0.0.1:11211', 'user', 'password') with mock.patch.object(bmemcached.client.Protocol, '_get_response') as mocked_response: mocked_response.return_value = (0, 0, 0, 0, 0, 0x81, 0, 0, 0, 0) self.assertRaises(MemcachedException, client.set, 'foo', 'bar', 300) def testIncrDecr(self): """ Incr/Decr raise MemcachedException unless the request wasn't successful. """ client = bmemcached.Client('127.0.0.1:11211', 'user', 'password') client.set('foo', 1) with mock.patch.object(bmemcached.client.Protocol, '_get_response') as mocked_response: mocked_response.return_value = (0, 0, 0, 0, 0, 0x81, 0, 0, 0, 2) self.assertRaises(MemcachedException, client.incr, 'foo', 1) self.assertRaises(MemcachedException, client.decr, 'foo', 1) def testDelete(self): """ Raise MemcachedException if the delete request isn't successful. """ client = bmemcached.Client('127.0.0.1:11211', 'user', 'password') client.flush_all() with mock.patch.object(bmemcached.client.Protocol, '_get_response') as mocked_response: mocked_response.return_value = (0, 0, 0, 0, 0, 0x81, 0, 0, 0, 0) self.assertRaises(MemcachedException, client.delete, 'foo') def testFlushAll(self): """ Raise MemcachedException if the flush wasn't successful. """ client = bmemcached.Client('127.0.0.1:11211', 'user', 'password') with mock.patch.object(bmemcached.client.Protocol, '_get_response') as mocked_response: mocked_response.return_value = (0, 0, 0, 0, 0, 0x81, 0, 0, 0, 0) self.assertRaises(MemcachedException, client.flush_all)
{ "content_hash": "7dd512353e448974e7e7681ef2e947f7", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 95, "avg_line_length": 42.66129032258065, "alnum_prop": 0.6264650283553875, "repo_name": "xmonster-tech/python-binary-memcached", "id": "fedd9dabd5d2480793668eaf317b7c0d5fa36683", "size": "2645", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/test_errors.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "71481" }, { "name": "Shell", "bytes": "104" } ], "symlink_target": "" }
""" Copyright 2012 Ali Ok (aliokATapacheDOTorg) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from trnltk.morphology.contextful.likelihoodmetrics.contextlessdistribution.contextlessdistributionsmoother import SimpleGoodTuringContextlessDistributionSmoother, CachedContextlessDistributionSmoother """ There is no verification -yet- in test of this class. The tests are there for making sure there is no run time exceptions """ import logging import os import pprint import unittest import pymongo from trnltk.morphology.contextful.likelihoodmetrics.contextlessdistribution.contextlessdistributioncalculator import ContextlessDistributionCalculator from trnltk.morphology.contextful.likelihoodmetrics.hidden.database import DatabaseIndexBuilder from trnltk.morphology.contextful.likelihoodmetrics.hidden.targetformgivencontextcounter import TargetFormGivenContextCounter from trnltk.morphology.contextless.parser.parser import ContextlessMorphologicalParser from trnltk.morphology.contextless.parser.rootfinder import WordRootFinder, DigitNumeralRootFinder, ProperNounFromApostropheRootFinder, ProperNounWithoutApostropheRootFinder, TextNumeralRootFinder from trnltk.morphology.model import formatter from trnltk.morphology.morphotactics.basicsuffixgraph import BasicSuffixGraph from trnltk.morphology.morphotactics.copulasuffixgraph import CopulaSuffixGraph from trnltk.morphology.morphotactics.numeralsuffixgraph import NumeralSuffixGraph from trnltk.morphology.morphotactics.predefinedpaths import PredefinedPaths from trnltk.morphology.lexicon.lexiconloader import LexiconLoader from trnltk.morphology.lexicon.rootgenerator import RootGenerator, RootMapGenerator from trnltk.morphology.morphotactics.propernounsuffixgraph import ProperNounSuffixGraph from trnltk.morphology.contextful.likelihoodmetrics.wordformcollocation.contextparsingcalculator import query_logger class BaseContextlessDistributionCalculatorTest(object): @classmethod def setUpClass(cls): all_roots = [] lexemes = LexiconLoader.load_from_file(os.path.join(os.path.dirname(__file__), '../../../../../resources/master_dictionary.txt')) for di in lexemes: all_roots.extend(RootGenerator.generate(di)) root_map_generator = RootMapGenerator() cls.root_map = root_map_generator.generate(all_roots) suffix_graph = CopulaSuffixGraph(NumeralSuffixGraph(ProperNounSuffixGraph(BasicSuffixGraph()))) suffix_graph.initialize() predefined_paths = PredefinedPaths(cls.root_map, suffix_graph) predefined_paths.create_predefined_paths() word_root_finder = WordRootFinder(cls.root_map) digit_numeral_root_finder = DigitNumeralRootFinder() text_numeral_root_finder = TextNumeralRootFinder(cls.root_map) proper_noun_from_apostrophe_root_finder = ProperNounFromApostropheRootFinder() proper_noun_without_apostrophe_root_finder = ProperNounWithoutApostropheRootFinder() cls.contextless_parser = ContextlessMorphologicalParser(suffix_graph, predefined_paths, [word_root_finder, digit_numeral_root_finder, text_numeral_root_finder, proper_noun_from_apostrophe_root_finder, proper_noun_without_apostrophe_root_finder]) mongodb_connection = pymongo.Connection(host='127.0.0.1') collection_map = { 1: mongodb_connection['trnltk']['wordUnigrams{}'.format(cls.parseset_index)] } database_index_builder = DatabaseIndexBuilder(collection_map) target_form_given_context_counter = TargetFormGivenContextCounter(collection_map) smoother = CachedContextlessDistributionSmoother() smoother.initialize() cls.calculator = ContextlessDistributionCalculator(database_index_builder, target_form_given_context_counter, smoother) cls.calculator.build_indexes() def setUp(self): logging.basicConfig(level=logging.INFO) query_logger.setLevel(logging.INFO) def _test_calculate(self, surface): results = self.contextless_parser.parse(surface) likelihoods = [] for result in results: formatted_parse_result = formatter.format_morpheme_container_for_parseset(result) formatted_parse_result_likelihood = self.calculator.calculate(result) likelihoods.append((formatted_parse_result, formatted_parse_result_likelihood)) pprint.pprint(likelihoods) def test_calculate_without_ambiguity(self): self._test_calculate(u'masa') self._test_calculate(u'kitap') self._test_calculate(u'deri') def test_calculate_with_ambiguity(self): self._test_calculate(u'onun') self._test_calculate(u'erkek') self._test_calculate(u'bir') def test_calculate_with_unparsable(self): self._test_calculate(u'asdasd') def test_calculate_with_non_existing(self): self._test_calculate(u'gelircesine') class ContextlessDistributionCalculatorTestForParseSet001(unittest.TestCase, BaseContextlessDistributionCalculatorTest): @classmethod def setUpClass(cls): BaseContextlessDistributionCalculatorTest.parseset_index = "001" BaseContextlessDistributionCalculatorTest.setUpClass() def test_calculate_with_unparsable(self): super(ContextlessDistributionCalculatorTestForParseSet001, self).test_calculate_with_unparsable() def test_calculate_with_ambiguity(self): super(ContextlessDistributionCalculatorTestForParseSet001, self).test_calculate_with_ambiguity() def test_calculate_without_ambiguity(self): super(ContextlessDistributionCalculatorTestForParseSet001, self).test_calculate_without_ambiguity() def test_calculate_with_non_existing(self): super(ContextlessDistributionCalculatorTestForParseSet001, self).test_calculate_with_non_existing() class ContextlessDistributionCalculatorTestForParseSet999(unittest.TestCase, BaseContextlessDistributionCalculatorTest): @classmethod def setUpClass(cls): BaseContextlessDistributionCalculatorTest.parseset_index = "999" BaseContextlessDistributionCalculatorTest.setUpClass() if __name__ == '__main__': unittest.main()
{ "content_hash": "ee390bc49758be9afe803d44d80d35ab", "timestamp": "", "source": "github", "line_count": 142, "max_line_length": 201, "avg_line_length": 47.04929577464789, "alnum_prop": 0.7747343212093998, "repo_name": "aliok/trnltk", "id": "3b729949134ea98c4fbba75bca15fd8570289491", "size": "6696", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "trnltk/morphology/contextful/likelihoodmetrics/contextlessdistribution/test/test_contextlessdistributioncalculator.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "60232" }, { "name": "Python", "bytes": "1320401" }, { "name": "Shell", "bytes": "2191" } ], "symlink_target": "" }
# Form implementation generated from reading ui file 'LearningSetup.ui' # # Created by: PyQt5 UI code generator 5.7 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(1000, 705) Dialog.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) self.trainingPatternMatching_GroupBox = QtWidgets.QGroupBox(Dialog) self.trainingPatternMatching_GroupBox.setEnabled(True) self.trainingPatternMatching_GroupBox.setGeometry(QtCore.QRect(5, 155, 490, 511)) self.trainingPatternMatching_GroupBox.setObjectName("trainingPatternMatching_GroupBox") self.trainingPatternPack_Label = QtWidgets.QLabel(self.trainingPatternMatching_GroupBox) self.trainingPatternPack_Label.setGeometry(QtCore.QRect(10, 164, 61, 16)) self.trainingPatternPack_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.trainingPatternPack_Label.setObjectName("trainingPatternPack_Label") self.trainingProcess_Label = QtWidgets.QLabel(self.trainingPatternMatching_GroupBox) self.trainingProcess_Label.setGeometry(QtCore.QRect(250, 164, 61, 16)) self.trainingProcess_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.trainingProcess_Label.setObjectName("trainingProcess_Label") self.trainingPatternToOrderInformation_Label = QtWidgets.QLabel(self.trainingPatternMatching_GroupBox) self.trainingPatternToOrderInformation_Label.setGeometry(QtCore.QRect(10, 220, 160, 16)) self.trainingPatternToOrderInformation_Label.setObjectName("trainingPatternToOrderInformation_Label") self.trainingPattern_Label = QtWidgets.QLabel(self.trainingPatternMatching_GroupBox) self.trainingPattern_Label.setGeometry(QtCore.QRect(250, 250, 60, 13)) self.trainingPattern_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.trainingPattern_Label.setObjectName("trainingPattern_Label") self.trainingOrder_Label = QtWidgets.QLabel(self.trainingPatternMatching_GroupBox) self.trainingOrder_Label.setGeometry(QtCore.QRect(250, 280, 60, 13)) self.trainingOrder_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.trainingOrder_Label.setObjectName("trainingOrder_Label") self.trainingPatternPack_ComboBox = QtWidgets.QComboBox(self.trainingPatternMatching_GroupBox) self.trainingPatternPack_ComboBox.setEnabled(False) self.trainingPatternPack_ComboBox.setGeometry(QtCore.QRect(80, 160, 160, 22)) self.trainingPatternPack_ComboBox.setObjectName("trainingPatternPack_ComboBox") self.trainingProcess_ComboBox = QtWidgets.QComboBox(self.trainingPatternMatching_GroupBox) self.trainingProcess_ComboBox.setEnabled(False) self.trainingProcess_ComboBox.setGeometry(QtCore.QRect(320, 160, 160, 22)) self.trainingProcess_ComboBox.setObjectName("trainingProcess_ComboBox") self.trainingPattern_ComboBox = QtWidgets.QComboBox(self.trainingPatternMatching_GroupBox) self.trainingPattern_ComboBox.setEnabled(False) self.trainingPattern_ComboBox.setGeometry(QtCore.QRect(320, 246, 160, 22)) self.trainingPattern_ComboBox.setObjectName("trainingPattern_ComboBox") self.trainingOrder_ComboBox = QtWidgets.QComboBox(self.trainingPatternMatching_GroupBox) self.trainingOrder_ComboBox.setEnabled(False) self.trainingOrder_ComboBox.setGeometry(QtCore.QRect(320, 276, 160, 22)) self.trainingOrder_ComboBox.setObjectName("trainingOrder_ComboBox") self.trainingPatternToOrderAssign_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingPatternToOrderAssign_Button.setEnabled(False) self.trainingPatternToOrderAssign_Button.setGeometry(QtCore.QRect(400, 310, 81, 30)) self.trainingPatternToOrderAssign_Button.setObjectName("trainingPatternToOrderAssign_Button") self.trainingPatternMatchingDelete_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingPatternMatchingDelete_Button.setEnabled(False) self.trainingPatternMatchingDelete_Button.setGeometry(QtCore.QRect(40, 120, 190, 30)) self.trainingPatternMatchingDelete_Button.setObjectName("trainingPatternMatchingDelete_Button") self.trainingPatternMatchingDown_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingPatternMatchingDown_Button.setEnabled(False) self.trainingPatternMatchingDown_Button.setGeometry(QtCore.QRect(10, 120, 30, 30)) self.trainingPatternMatchingDown_Button.setObjectName("trainingPatternMatchingDown_Button") self.trainingPatternMatchingUp_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingPatternMatchingUp_Button.setEnabled(False) self.trainingPatternMatchingUp_Button.setGeometry(QtCore.QRect(230, 120, 30, 30)) self.trainingPatternMatchingUp_Button.setObjectName("trainingPatternMatchingUp_Button") self.trainingPatternMatchingMaking_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingPatternMatchingMaking_Button.setEnabled(False) self.trainingPatternMatchingMaking_Button.setGeometry(QtCore.QRect(290, 40, 191, 30)) self.trainingPatternMatchingMaking_Button.setObjectName("trainingPatternMatchingMaking_Button") self.trainingPatternToOrderDelete_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingPatternToOrderDelete_Button.setEnabled(False) self.trainingPatternToOrderDelete_Button.setGeometry(QtCore.QRect(10, 410, 250, 30)) self.trainingPatternToOrderDelete_Button.setObjectName("trainingPatternToOrderDelete_Button") self.trainingAutoAssign_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingAutoAssign_Button.setEnabled(False) self.trainingAutoAssign_Button.setGeometry(QtCore.QRect(400, 190, 81, 30)) self.trainingAutoAssign_Button.setObjectName("trainingAutoAssign_Button") self.trainingPatternMatching_Label = QtWidgets.QLabel(self.trainingPatternMatching_GroupBox) self.trainingPatternMatching_Label.setGeometry(QtCore.QRect(10, 20, 141, 16)) self.trainingPatternMatching_Label.setObjectName("trainingPatternMatching_Label") self.trainingPatternMatchingEnd_Button = QtWidgets.QPushButton(self.trainingPatternMatching_GroupBox) self.trainingPatternMatchingEnd_Button.setEnabled(False) self.trainingPatternMatchingEnd_Button.setGeometry(QtCore.QRect(290, 80, 191, 30)) self.trainingPatternMatchingEnd_Button.setObjectName("trainingPatternMatchingEnd_Button") self.trainingPatternMatching_ListWidget = QtWidgets.QListWidget(self.trainingPatternMatching_GroupBox) self.trainingPatternMatching_ListWidget.setGeometry(QtCore.QRect(10, 40, 250, 71)) self.trainingPatternMatching_ListWidget.setObjectName("trainingPatternMatching_ListWidget") self.trainingPatternToOrderInformation_ListWidget = QtWidgets.QListWidget(self.trainingPatternMatching_GroupBox) self.trainingPatternToOrderInformation_ListWidget.setGeometry(QtCore.QRect(10, 240, 250, 161)) self.trainingPatternToOrderInformation_ListWidget.setObjectName("trainingPatternToOrderInformation_ListWidget") self.learningSetup_GroupBox = QtWidgets.QGroupBox(Dialog) self.learningSetup_GroupBox.setGeometry(QtCore.QRect(5, 10, 491, 141)) self.learningSetup_GroupBox.setObjectName("learningSetup_GroupBox") self.learningSetupDelete_Button = QtWidgets.QPushButton(self.learningSetup_GroupBox) self.learningSetupDelete_Button.setGeometry(QtCore.QRect(40, 100, 171, 30)) self.learningSetupDelete_Button.setObjectName("learningSetupDelete_Button") self.learningSetupDown_Button = QtWidgets.QPushButton(self.learningSetup_GroupBox) self.learningSetupDown_Button.setGeometry(QtCore.QRect(10, 100, 30, 30)) self.learningSetupDown_Button.setObjectName("learningSetupDown_Button") self.learningSetupUp_Button = QtWidgets.QPushButton(self.learningSetup_GroupBox) self.learningSetupUp_Button.setGeometry(QtCore.QRect(210, 100, 30, 30)) self.learningSetupUp_Button.setObjectName("learningSetupUp_Button") self.learningSetupMaking_Button = QtWidgets.QPushButton(self.learningSetup_GroupBox) self.learningSetupMaking_Button.setGeometry(QtCore.QRect(260, 65, 100, 30)) self.learningSetupMaking_Button.setObjectName("learningSetupMaking_Button") self.learningSetupEnd_Button = QtWidgets.QPushButton(self.learningSetup_GroupBox) self.learningSetupEnd_Button.setEnabled(False) self.learningSetupEnd_Button.setGeometry(QtCore.QRect(320, 100, 100, 30)) self.learningSetupEnd_Button.setObjectName("learningSetupEnd_Button") self.learningSetupModify_Button = QtWidgets.QPushButton(self.learningSetup_GroupBox) self.learningSetupModify_Button.setGeometry(QtCore.QRect(380, 65, 100, 30)) self.learningSetupModify_Button.setObjectName("learningSetupModify_Button") self.learningSetupName_Label = QtWidgets.QLabel(self.learningSetup_GroupBox) self.learningSetupName_Label.setGeometry(QtCore.QRect(260, 20, 101, 16)) self.learningSetupName_Label.setObjectName("learningSetupName_Label") self.learningSetupName_LineEdit = QtWidgets.QLineEdit(self.learningSetup_GroupBox) self.learningSetupName_LineEdit.setGeometry(QtCore.QRect(260, 40, 221, 20)) self.learningSetupName_LineEdit.setObjectName("learningSetupName_LineEdit") self.learningSetup_ListWidget = QtWidgets.QListWidget(self.learningSetup_GroupBox) self.learningSetup_ListWidget.setGeometry(QtCore.QRect(10, 20, 231, 71)) self.learningSetup_ListWidget.setObjectName("learningSetup_ListWidget") self.trainingAndTestMethod_GroupBox = QtWidgets.QGroupBox(Dialog) self.trainingAndTestMethod_GroupBox.setEnabled(True) self.trainingAndTestMethod_GroupBox.setGeometry(QtCore.QRect(505, 10, 491, 141)) self.trainingAndTestMethod_GroupBox.setObjectName("trainingAndTestMethod_GroupBox") self.shufflingMethod_ComboBox = QtWidgets.QComboBox(self.trainingAndTestMethod_GroupBox) self.shufflingMethod_ComboBox.setEnabled(False) self.shufflingMethod_ComboBox.setGeometry(QtCore.QRect(110, 110, 260, 22)) self.shufflingMethod_ComboBox.setObjectName("shufflingMethod_ComboBox") self.shufflingMethod_ComboBox.addItem("") self.shufflingMethod_ComboBox.addItem("") self.shufflingMethod_ComboBox.addItem("") self.shufflingMethod_ComboBox.addItem("") self.shufflingMethod_ComboBox.addItem("") self.shufflingMethod_Label = QtWidgets.QLabel(self.trainingAndTestMethod_GroupBox) self.shufflingMethod_Label.setGeometry(QtCore.QRect(10, 114, 90, 13)) self.shufflingMethod_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.shufflingMethod_Label.setObjectName("shufflingMethod_Label") self.testTiming_Label = QtWidgets.QLabel(self.trainingAndTestMethod_GroupBox) self.testTiming_Label.setGeometry(QtCore.QRect(10, 54, 90, 13)) self.testTiming_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.testTiming_Label.setObjectName("testTiming_Label") self.trainingEpoch_Label = QtWidgets.QLabel(self.trainingAndTestMethod_GroupBox) self.trainingEpoch_Label.setGeometry(QtCore.QRect(10, 24, 90, 13)) self.trainingEpoch_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.trainingEpoch_Label.setObjectName("trainingEpoch_Label") self.miniBatchSize_Label = QtWidgets.QLabel(self.trainingAndTestMethod_GroupBox) self.miniBatchSize_Label.setGeometry(QtCore.QRect(10, 84, 90, 13)) self.miniBatchSize_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.miniBatchSize_Label.setObjectName("miniBatchSize_Label") self.trainingEpoch_LineEdit = QtWidgets.QLineEdit(self.trainingAndTestMethod_GroupBox) self.trainingEpoch_LineEdit.setEnabled(False) self.trainingEpoch_LineEdit.setGeometry(QtCore.QRect(110, 20, 120, 20)) self.trainingEpoch_LineEdit.setFocusPolicy(QtCore.Qt.StrongFocus) self.trainingEpoch_LineEdit.setText("") self.trainingEpoch_LineEdit.setFrame(True) self.trainingEpoch_LineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.trainingEpoch_LineEdit.setReadOnly(False) self.trainingEpoch_LineEdit.setObjectName("trainingEpoch_LineEdit") self.testTiming_LineEdit = QtWidgets.QLineEdit(self.trainingAndTestMethod_GroupBox) self.testTiming_LineEdit.setEnabled(False) self.testTiming_LineEdit.setGeometry(QtCore.QRect(110, 50, 120, 20)) self.testTiming_LineEdit.setFocusPolicy(QtCore.Qt.StrongFocus) self.testTiming_LineEdit.setText("") self.testTiming_LineEdit.setFrame(True) self.testTiming_LineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.testTiming_LineEdit.setReadOnly(False) self.testTiming_LineEdit.setObjectName("testTiming_LineEdit") self.miniBatchSize_LineEdit = QtWidgets.QLineEdit(self.trainingAndTestMethod_GroupBox) self.miniBatchSize_LineEdit.setEnabled(False) self.miniBatchSize_LineEdit.setGeometry(QtCore.QRect(110, 80, 120, 20)) self.miniBatchSize_LineEdit.setFocusPolicy(QtCore.Qt.StrongFocus) self.miniBatchSize_LineEdit.setText("") self.miniBatchSize_LineEdit.setFrame(True) self.miniBatchSize_LineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.miniBatchSize_LineEdit.setReadOnly(False) self.miniBatchSize_LineEdit.setObjectName("miniBatchSize_LineEdit") self.load_Button = QtWidgets.QPushButton(Dialog) self.load_Button.setGeometry(QtCore.QRect(420, 670, 160, 30)) self.load_Button.setObjectName("load_Button") self.save_Button = QtWidgets.QPushButton(Dialog) self.save_Button.setGeometry(QtCore.QRect(200, 670, 160, 30)) self.save_Button.setObjectName("save_Button") self.exit_Button = QtWidgets.QPushButton(Dialog) self.exit_Button.setGeometry(QtCore.QRect(640, 670, 160, 30)) self.exit_Button.setObjectName("exit_Button") self.testPatternMatching_GroupBox = QtWidgets.QGroupBox(Dialog) self.testPatternMatching_GroupBox.setEnabled(True) self.testPatternMatching_GroupBox.setGeometry(QtCore.QRect(505, 155, 490, 511)) self.testPatternMatching_GroupBox.setObjectName("testPatternMatching_GroupBox") self.testPatternPack_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.testPatternPack_Label.setGeometry(QtCore.QRect(10, 164, 61, 16)) self.testPatternPack_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.testPatternPack_Label.setObjectName("testPatternPack_Label") self.testProcess_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.testProcess_Label.setGeometry(QtCore.QRect(250, 164, 61, 16)) self.testProcess_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.testProcess_Label.setObjectName("testProcess_Label") self.testPatternToOrderInformation_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.testPatternToOrderInformation_Label.setGeometry(QtCore.QRect(10, 220, 160, 16)) self.testPatternToOrderInformation_Label.setObjectName("testPatternToOrderInformation_Label") self.testPattern_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.testPattern_Label.setGeometry(QtCore.QRect(250, 250, 60, 13)) self.testPattern_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.testPattern_Label.setObjectName("testPattern_Label") self.testOrder_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.testOrder_Label.setGeometry(QtCore.QRect(250, 280, 60, 13)) self.testOrder_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.testOrder_Label.setObjectName("testOrder_Label") self.testPatternPack_ComboBox = QtWidgets.QComboBox(self.testPatternMatching_GroupBox) self.testPatternPack_ComboBox.setEnabled(False) self.testPatternPack_ComboBox.setGeometry(QtCore.QRect(80, 160, 160, 22)) self.testPatternPack_ComboBox.setObjectName("testPatternPack_ComboBox") self.testProcess_ComboBox = QtWidgets.QComboBox(self.testPatternMatching_GroupBox) self.testProcess_ComboBox.setEnabled(False) self.testProcess_ComboBox.setGeometry(QtCore.QRect(320, 160, 160, 22)) self.testProcess_ComboBox.setObjectName("testProcess_ComboBox") self.testPattern_ComboBox = QtWidgets.QComboBox(self.testPatternMatching_GroupBox) self.testPattern_ComboBox.setEnabled(False) self.testPattern_ComboBox.setGeometry(QtCore.QRect(320, 246, 160, 22)) self.testPattern_ComboBox.setObjectName("testPattern_ComboBox") self.testOrder_ComboBox = QtWidgets.QComboBox(self.testPatternMatching_GroupBox) self.testOrder_ComboBox.setEnabled(False) self.testOrder_ComboBox.setGeometry(QtCore.QRect(320, 276, 160, 22)) self.testOrder_ComboBox.setObjectName("testOrder_ComboBox") self.testPatternToOrderAssign_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.testPatternToOrderAssign_Button.setEnabled(False) self.testPatternToOrderAssign_Button.setGeometry(QtCore.QRect(400, 310, 81, 30)) self.testPatternToOrderAssign_Button.setObjectName("testPatternToOrderAssign_Button") self.testPatternMatchingDelete_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.testPatternMatchingDelete_Button.setEnabled(False) self.testPatternMatchingDelete_Button.setGeometry(QtCore.QRect(10, 120, 250, 30)) self.testPatternMatchingDelete_Button.setObjectName("testPatternMatchingDelete_Button") self.testPatternMatchingMaking_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.testPatternMatchingMaking_Button.setEnabled(False) self.testPatternMatchingMaking_Button.setGeometry(QtCore.QRect(290, 40, 191, 30)) self.testPatternMatchingMaking_Button.setObjectName("testPatternMatchingMaking_Button") self.testPatternToOrderDelete_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.testPatternToOrderDelete_Button.setEnabled(False) self.testPatternToOrderDelete_Button.setGeometry(QtCore.QRect(10, 310, 250, 30)) self.testPatternToOrderDelete_Button.setObjectName("testPatternToOrderDelete_Button") self.testAutoAssign_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.testAutoAssign_Button.setEnabled(False) self.testAutoAssign_Button.setGeometry(QtCore.QRect(400, 190, 81, 30)) self.testAutoAssign_Button.setObjectName("testAutoAssign_Button") self.testPatternMatching_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.testPatternMatching_Label.setGeometry(QtCore.QRect(10, 20, 141, 16)) self.testPatternMatching_Label.setObjectName("testPatternMatching_Label") self.testPatternMatchingEnd_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.testPatternMatchingEnd_Button.setEnabled(False) self.testPatternMatchingEnd_Button.setGeometry(QtCore.QRect(290, 80, 191, 30)) self.testPatternMatchingEnd_Button.setObjectName("testPatternMatchingEnd_Button") self.testPatternMatching_ListWidget = QtWidgets.QListWidget(self.testPatternMatching_GroupBox) self.testPatternMatching_ListWidget.setGeometry(QtCore.QRect(10, 40, 250, 71)) self.testPatternMatching_ListWidget.setObjectName("testPatternMatching_ListWidget") self.testPatternToOrderInformation_ListWidget = QtWidgets.QListWidget(self.testPatternMatching_GroupBox) self.testPatternToOrderInformation_ListWidget.setGeometry(QtCore.QRect(10, 240, 250, 61)) self.testPatternToOrderInformation_ListWidget.setObjectName("testPatternToOrderInformation_ListWidget") self.extractDataDelete_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.extractDataDelete_Button.setEnabled(False) self.extractDataDelete_Button.setGeometry(QtCore.QRect(10, 470, 250, 30)) self.extractDataDelete_Button.setObjectName("extractDataDelete_Button") self.extractData_ListWidget = QtWidgets.QListWidget(self.testPatternMatching_GroupBox) self.extractData_ListWidget.setGeometry(QtCore.QRect(10, 370, 250, 91)) self.extractData_ListWidget.setObjectName("extractData_ListWidget") self.extractData_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.extractData_Label.setGeometry(QtCore.QRect(10, 350, 160, 16)) self.extractData_Label.setObjectName("extractData_Label") self.extractDataAssign_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.extractDataAssign_Button.setEnabled(False) self.extractDataAssign_Button.setGeometry(QtCore.QRect(400, 470, 81, 30)) self.extractDataAssign_Button.setObjectName("extractDataAssign_Button") self.extractDataOrder_ComboBox = QtWidgets.QComboBox(self.testPatternMatching_GroupBox) self.extractDataOrder_ComboBox.setEnabled(False) self.extractDataOrder_ComboBox.setGeometry(QtCore.QRect(320, 406, 160, 22)) self.extractDataOrder_ComboBox.setObjectName("extractDataOrder_ComboBox") self.extractDataOrder_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.extractDataOrder_Label.setGeometry(QtCore.QRect(250, 410, 60, 13)) self.extractDataOrder_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.extractDataOrder_Label.setObjectName("extractDataOrder_Label") self.extractDataPattern_ComboBox = QtWidgets.QComboBox(self.testPatternMatching_GroupBox) self.extractDataPattern_ComboBox.setEnabled(False) self.extractDataPattern_ComboBox.setGeometry(QtCore.QRect(320, 376, 160, 22)) self.extractDataPattern_ComboBox.setObjectName("extractDataPattern_ComboBox") self.extractDataPattern_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.extractDataPattern_Label.setGeometry(QtCore.QRect(250, 380, 60, 13)) self.extractDataPattern_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.extractDataPattern_Label.setObjectName("extractDataPattern_Label") self.extractDataType_ComboBox = QtWidgets.QComboBox(self.testPatternMatching_GroupBox) self.extractDataType_ComboBox.setEnabled(False) self.extractDataType_ComboBox.setGeometry(QtCore.QRect(320, 440, 160, 22)) self.extractDataType_ComboBox.setObjectName("extractDataType_ComboBox") self.extractDataType_ComboBox.addItem("") self.extractDataType_ComboBox.addItem("") self.extractDataType_ComboBox.addItem("") self.extractDataType_ComboBox.addItem("") self.extractDataType_Label = QtWidgets.QLabel(self.testPatternMatching_GroupBox) self.extractDataType_Label.setGeometry(QtCore.QRect(250, 444, 60, 13)) self.extractDataType_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.extractDataType_Label.setObjectName("extractDataType_Label") self.extractDataAutoAssign_Button = QtWidgets.QPushButton(self.testPatternMatching_GroupBox) self.extractDataAutoAssign_Button.setEnabled(False) self.extractDataAutoAssign_Button.setGeometry(QtCore.QRect(300, 470, 81, 30)) self.extractDataAutoAssign_Button.setObjectName("extractDataAutoAssign_Button") self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "Learning Setup")) self.trainingPatternMatching_GroupBox.setTitle(_translate("Dialog", "Training Pattern Matching")) self.trainingPatternPack_Label.setText(_translate("Dialog", "Pattern Pack")) self.trainingProcess_Label.setText(_translate("Dialog", "Process")) self.trainingPatternToOrderInformation_Label.setText(_translate("Dialog", "Order ← Pattern Information")) self.trainingPattern_Label.setText(_translate("Dialog", "Pattern")) self.trainingOrder_Label.setText(_translate("Dialog", "Order")) self.trainingPatternToOrderAssign_Button.setText(_translate("Dialog", "Assign")) self.trainingPatternMatchingDelete_Button.setText(_translate("Dialog", "Delete")) self.trainingPatternMatchingDown_Button.setText(_translate("Dialog", "▼")) self.trainingPatternMatchingUp_Button.setText(_translate("Dialog", "▲")) self.trainingPatternMatchingMaking_Button.setText(_translate("Dialog", "Making")) self.trainingPatternToOrderDelete_Button.setText(_translate("Dialog", "Delete")) self.trainingAutoAssign_Button.setText(_translate("Dialog", "Auto Assign")) self.trainingPatternMatching_Label.setText(_translate("Dialog", "Pattern Matching Information")) self.trainingPatternMatchingEnd_Button.setText(_translate("Dialog", "End")) self.learningSetup_GroupBox.setTitle(_translate("Dialog", "Learning Setup")) self.learningSetupDelete_Button.setText(_translate("Dialog", "Delete")) self.learningSetupDown_Button.setText(_translate("Dialog", "▼")) self.learningSetupUp_Button.setText(_translate("Dialog", "▲")) self.learningSetupMaking_Button.setText(_translate("Dialog", "Making")) self.learningSetupEnd_Button.setText(_translate("Dialog", "End")) self.learningSetupModify_Button.setText(_translate("Dialog", "Modify")) self.learningSetupName_Label.setText(_translate("Dialog", "Learning Setup Name")) self.trainingAndTestMethod_GroupBox.setTitle(_translate("Dialog", "Training && Test Method")) self.shufflingMethod_ComboBox.setItemText(0, _translate("Dialog", "Random all")) self.shufflingMethod_ComboBox.setItemText(1, _translate("Dialog", "Matching: Random, Pattern: Random")) self.shufflingMethod_ComboBox.setItemText(2, _translate("Dialog", "Matching: Sequence, Pattern: Random")) self.shufflingMethod_ComboBox.setItemText(3, _translate("Dialog", "Matching: Random, Pattern: Sequence")) self.shufflingMethod_ComboBox.setItemText(4, _translate("Dialog", "Matching: Sequence, Pattern: Sequence")) self.shufflingMethod_Label.setText(_translate("Dialog", "Shuffleing Method")) self.testTiming_Label.setText(_translate("Dialog", "Test Timing")) self.trainingEpoch_Label.setText(_translate("Dialog", "Training Epoch")) self.miniBatchSize_Label.setText(_translate("Dialog", "Mini-Batch Size")) self.load_Button.setText(_translate("Dialog", "Learning Setup Load")) self.save_Button.setText(_translate("Dialog", "Learning Setup Save")) self.exit_Button.setText(_translate("Dialog", "Exit")) self.testPatternMatching_GroupBox.setTitle(_translate("Dialog", "Test Pattern Matching")) self.testPatternPack_Label.setText(_translate("Dialog", "Pattern Pack")) self.testProcess_Label.setText(_translate("Dialog", "Process")) self.testPatternToOrderInformation_Label.setText(_translate("Dialog", "Order ← Pattern Information")) self.testPattern_Label.setText(_translate("Dialog", "Pattern")) self.testOrder_Label.setText(_translate("Dialog", "Order")) self.testPatternToOrderAssign_Button.setText(_translate("Dialog", "Assign")) self.testPatternMatchingDelete_Button.setText(_translate("Dialog", "Delete")) self.testPatternMatchingMaking_Button.setText(_translate("Dialog", "Making")) self.testPatternToOrderDelete_Button.setText(_translate("Dialog", "Delete")) self.testAutoAssign_Button.setText(_translate("Dialog", "Auto Assign")) self.testPatternMatching_Label.setText(_translate("Dialog", "Pattern Matching Information")) self.testPatternMatchingEnd_Button.setText(_translate("Dialog", "End")) self.extractDataDelete_Button.setText(_translate("Dialog", "Delete")) self.extractData_Label.setText(_translate("Dialog", "Extract Data from Activation")) self.extractDataAssign_Button.setText(_translate("Dialog", "Assign")) self.extractDataOrder_Label.setText(_translate("Dialog", "Order")) self.extractDataPattern_Label.setText(_translate("Dialog", "Pattern")) self.extractDataType_ComboBox.setItemText(0, _translate("Dialog", "Raw Activation")) self.extractDataType_ComboBox.setItemText(1, _translate("Dialog", "Mean Squared Error")) self.extractDataType_ComboBox.setItemText(2, _translate("Dialog", "Cross Entropy")) self.extractDataType_ComboBox.setItemText(3, _translate("Dialog", "Semantic Stress")) self.extractDataType_Label.setText(_translate("Dialog", "Ext. Type")) self.extractDataAutoAssign_Button.setText(_translate("Dialog", "Auto Assign")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Dialog = QtWidgets.QDialog() ui = Ui_Dialog() ui.setupUi(Dialog) Dialog.show() sys.exit(app.exec_())
{ "content_hash": "30c788e49248dc0343ca6d11444f9c46", "timestamp": "", "source": "github", "line_count": 386, "max_line_length": 120, "avg_line_length": 79.67357512953367, "alnum_prop": 0.7511543213890876, "repo_name": "CODEJIN/HNet_on_Tensorflow", "id": "ee48de9428dbe265f004cdb96d888e57b25a184d", "size": "30791", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "QT_UI/LearningSetup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "459" }, { "name": "Python", "bytes": "2370418" } ], "symlink_target": "" }
"""Generate Graph Embedding using Node2Vec.""" import os from absl import app from absl import flags from absl import logging import utils # pylint: disable=g-bad-import-order import utils_gcs # pylint: disable=g-bad-import-order FLAGS = flags.FLAGS flags.DEFINE_string('gcs_path_in', None, 'gcs bucket input directory') flags.DEFINE_string('gcs_path_out', None, 'gcs bucket output directory') flags.DEFINE_string('local_path', './fake_input/', 'graph csv/gpickle file') # Graph embedding parameters flags.DEFINE_string('load_method', 'csv', 'csv, gpickle') flags.DEFINE_string('g_file', '', 'graph csv/gpickle file') flags.DEFINE_integer('dim', 128, 'graph embedding dimension') flags.DEFINE_integer('walk_len', 30, 'walk length') flags.DEFINE_integer('num_walk', 100, 'number of walks') flags.DEFINE_integer('workers', 4, 'number of workers') flags.DEFINE_integer('win_size', 10, 'window size') flags.DEFINE_string('g_emb', '', 'graph embedding file') def main(_): if not os.path.exists(FLAGS.local_path): utils_gcs.download_files_from_gcs(FLAGS.local_path, FLAGS.gcs_path_in) logging.info('Data downloaded successfully!') graph = utils.load_graph(os.path.join(FLAGS.local_path, FLAGS.g_file), load_method=FLAGS.load_method, directed=False) _ = utils.get_n2v_graph_embedding( os.path.join(FLAGS.local_path, FLAGS.g_emb), graph_gen=True, graph=graph, dimensions=FLAGS.dim, walk_length=FLAGS.walk_len, num_walks=FLAGS.num_walk, workers=FLAGS.workers, win_size=FLAGS.win_size, normalize_type='minmax') utils_gcs.upload_files_to_gcs(local_folder=FLAGS.local_path, gcs_path=FLAGS.gcs_path_out) if __name__ == '__main__': app.run(main)
{ "content_hash": "e0a73fa32317f82aad33de0af61869ac", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 76, "avg_line_length": 38.733333333333334, "alnum_prop": 0.6970740103270223, "repo_name": "google-research/social_cascades", "id": "0e65e6471564d108bd5e0edf11006e7d0fee93ac", "size": "2319", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "news/graph_embedding_generate.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "94927" }, { "name": "Starlark", "bytes": "1215" } ], "symlink_target": "" }
import os import unittest from microflack_common.test import FlackTestCase os.environ['FLASK_CONFIG'] = 'test' from app import app class UITests(FlackTestCase): def setUp(self): self.ctx = app.app_context() self.ctx.push() self.client = app.test_client() def tearDown(self): self.ctx.pop() def test_index_page(self): r, s, h = self.get('/') self.assertTrue(h['Content-Type'].startswith('text/html')) if __name__ == '__main__': unittest.main(verbosity=2)
{ "content_hash": "0844dd7c80623b161e7b68ad87766e04", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 66, "avg_line_length": 21.04, "alnum_prop": 0.6216730038022814, "repo_name": "miguelgrinberg/microflack_ui", "id": "4d1c24f7d5afd389c42edfda34943ffec79fdb6a", "size": "548", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "8376" }, { "name": "Dockerfile", "bytes": "309" }, { "name": "HTML", "bytes": "3947" }, { "name": "JavaScript", "bytes": "17941" }, { "name": "Python", "bytes": "1545" }, { "name": "Shell", "bytes": "796" } ], "symlink_target": "" }
import traceback from cStringIO import StringIO from lxml import etree as ET from lxml import etree import datetime import logging.handlers import logging from foam.config import LOGDIR, LOGLEVEL, LOGFORMAT lhandle = logging.handlers.RotatingFileHandler('%s/gapi-actions.log' % (LOGDIR), maxBytes=1000000) lhandle.setLevel(LOGLEVEL) lhandle.setFormatter(logging.Formatter(LOGFORMAT)) actionlog = logging.getLogger('gapi-actions') actionlog.addHandler(lhandle) actionlog.setLevel(LOGLEVEL) import foam.task import foam.openflow.types from foam.flowvisor import Connection as FV from foam.core.json import jsonify, jsonValidate, JSONValidationError from foam.core.exception import CoreException from foam.creds import CredVerifier from foam.lib import NoGroupName, VirtualLink, FlowSpec, _asUTC from foam.core.configdb import ConfigDB from foam.geni.db import GeniDB, getManagerID, generateSwitchComponentID, UnknownSliver from foam.geni.topology import TopoDB import foam.geni.approval OFNSv3 = "/opt/ofelia/ofam/local/schemas" OFNSv4 = "/opt/ofelia/ofam/local/schemas" PGNS = "/opt/ofelia/ofam/local/schemas" XSNS = "http://www.w3.org/2001/XMLSchema-instance" def deleteSliver (slice_urn = None, sliver_urn = None): slice_name = GeniDB.getFlowvisorSliceName(slice_urn=slice_urn, sliver_urn = sliver_urn) if FV.sliceExists(slice_name): # stats = FV.getCombinedStats(slice_name) # GeniDB.insertFinalStats(slice_urn, stats) FV.deleteSlice(slice_name) GeniDB.deleteSliver(slice_urn=slice_urn, sliver_urn=sliver_urn) foam.geni.approval.rebuildDB() def siteTagChange (key, value): flog = logging.getLogger('foam') flog.warning("Changing site tag to '%s' - regenerating all datapath URNs" % (value)) GeniDB.rebuildDatapathURNs(value) return value def updateMaxLease (key, value): if type(value) is not int: value = int(value) flog = logging.getLogger('foam') tval = datetime.timedelta(hours=value) flog.info("Changing geni.max-lease to %s" % (str(tval))) return tval def addAdDevice (rspec, dpid, active=True): switch_urn = generateSwitchComponentID(dpid) od = ET.SubElement(rspec, "{%s}datapath" % (OFNSv3)) od.attrib["component_id"] = switch_urn od.attrib["component_manager_id"] = getManagerID() od.attrib["dpid"] = dpid locdata = GeniDB.getLocationData(dpid, switch_urn) if locdata: ET.SubElement(od, "{%s}location" % (OFNSv3), country=locdata.country, latitude=locdata.lat, longitude=locdata.long) attachments = TopoDB.getDPIDAttachments(dpid) if active: ports = FV.getDevicePorts(dpid) for port in ports: if (port.features == None): p = ET.SubElement(od, "{%s}port" % (OFNSv3), num=str(port.num), name=port.name) else: p = ET.SubElement(od, "{%s}port" % (OFNSv3), num=str(port.num), name=port.name, features=port.features) for info in attachments.setdefault(port.name, []): a = ET.SubElement(p, "{%s}attachment" % (OFNSv3)) a.attrib["remote_component_id"] = info.remote_component_id a.attrib["remote_port"] = info.remote_port a.attrib["desc"] = info.desc #getLinks START def addAdLink (rspec, link): od = ET.SubElement(rspec, "{%s}link" % (OFNSv3)) od.attrib["srcDPID"] = link["srcDPID"] od.attrib["srcPort"] = link["srcPort"] od.attrib["dstDPID"] = link["dstDPID"] od.attrib["dstPort"] = link["dstPort"] #getLinks END def getAdvertisement (): NSMAP = {None: "%s" % (PGNS), "xs" : "%s" % (XSNS), "openflow" : "%s" % (OFNSv3)} rspec = ET.Element("rspec", nsmap = NSMAP) rspec.attrib["{%s}schemaLocation" % (XSNS)] = PGNS + " " \ "http://www.geni.net/resources/rspec/3/ad.xsd " + \ OFNSv3 + " " \ "http://www.geni.net/resources/rspec/ext/openflow/3/of-ad.xsd" rspec.attrib["type"] = "advertisement" links = FV.getLinkList() devices = FV.getDeviceList() fvversion = FV.getFVVersion() db_devices = GeniDB.getDeviceSet() GeniDB.refreshDevices(devices) for dpid in devices: db_devices.discard(dpid) addAdDevice(rspec, dpid) for dpid in db_devices: addAdDevice(rspec, dpid, False) #getLinks START for link in links: addAdLink(rspec, link) #getLinks END xml = StringIO() ET.ElementTree(rspec).write(xml) return xml.getvalue() def approveSliver (request, logger): try: jsonValidate(request.json, [("sliver_urn", (unicode,str)), ("priority", int)], logger) if (not request.json.has_key("sliver_urn")) or (not request.json.has_key("priority")): return jsonify({"exception" : "You must specify a sliver_urn and priority"}) slice_name = GeniDB.getFlowvisorSliceName(sliver_urn=request.json["sliver_urn"]) if FV.sliceExists(slice_name): return jsonify({"Fault" : "Flowvisor slice '%s' already exists" % (slice_name)}) sobj = GeniDB.getSliverObj(request.json["sliver_urn"]) GeniDB.setSliverStatus(request.json["sliver_urn"], True) GeniDB.setSliverPriority(request.json["sliver_urn"], request.json["priority"]) GeniDB.commit() foam.geni.approval.AppData.addSliver(sobj) sobj.createSlice() sobj.insertFlowspace(request.json["priority"]) sobj.insertVirtualLink() data = GeniDB.getSliverData(sobj.getURN(), True) foam.task.emailApproveSliver(data) return jsonify(None) except JSONValidationError, e: jd = e.__json__() return jsonify(jd, code = 1, msg = jd["exception"]) except Exception, e: logger.exception("Exception") return jsonify(None, code = 2, msg = traceback.format_exc()) def createSliver (slice_urn, credentials, rspec, user_info): flog = logging.getLogger('foam') if GeniDB.sliceExists(slice_urn): raise DuplicateSliver(slice_urn) creds = CredVerifier.fromStrings(credentials, "createsliver", slice_urn) try: s = StringIO(rspec) rspec_dom = ET.parse(s) except Exception, exc: flog.exception("XML rspec parsing error") raise RspecParseError(slice_urn, str(exc)) of3 = open("/opt/ofelia/ofam/local/schemas/of-resv-3.xsd", "r") xsdoc3 = etree.parse(of3) xs3 = etree.XMLSchema(xsdoc3) try: xs3.assertValid(rspec_dom) except etree.DocumentInvalid, e: flog.exception("XML rspec validation error") raise RspecValidationError() rspec_elem = rspec_dom.getroot() schemas = rspec_elem.get("{%s}schemaLocation" % (XSNS)) expiration = _asUTC(datetime.datetime.utcnow()) + ConfigDB.getConfigItemByKey("geni.max-lease").getValue() for cred in creds: credexp = _asUTC(cred.expiration) if credexp < expiration: expiration = credexp GeniDB.refreshDevices() sliver = GENISliver(rspec_dom) sliver.setUserURN(user_info["urn"]) sliver.setUserEmail(user_info["email"], overwrite=False) sliver.validate() GeniDB.insertSliver(slice_urn, sliver, rspec, expiration) return sliver class IllegalEthertype(CoreException): def __init__ (self, dltype): super(IllegalEthertype, self).__init__() self.dltype = dltype def __str__ (self): return "Experimenters may not request ethertype (%s)" % (self.dltype) class NoExperimenterEmail(CoreException): def __init__ (self): super(NoExperimenterEmail, self).__init__() def __str__ (self): return "An email address was not specified in the rspec nor found in the user credential." class RspecValidationError(CoreException): def __init__ (self): pass def __str__ (self): return "XML Schema validation error parsing request rspec." class RspecParseError(CoreException): def __init__ (self, slice_urn, error): self.slice_urn = slice_urn self.err_msg = error def __str__ (self): return "Can't create sliver for slice %s - Exception parsing rspec: %s" % (self.slice_urn, self.err_msg) class DuplicateSliver(CoreException): def __init__ (self, urn): self.slice_urn = urn def __str__ (self): return "Sliver for slice %s already exists." % (self.slice_urn) class UnmanagedComponent(CoreException): def __init__ (self, cid): self.cid = cid def __str__ (self): return "Component (%s) is not managed by this aggregate." % (self.cid) class UnknownComponentManagerID(CoreException): def __init__ (self, cid): self.cid = cid def __str__ (self): return "Specified component manager (%s) is not this aggregate." % (self.cid) class ComponentManagerIDMismatch(CoreException): def __init__ (self, cid, cmid): self.cid = cid self.cmid = cmid def __str__ (self): return "Component ID (%s) is not a member of specified manager (%s)." % (self.cid, self.cmid) class GENIDatapath(foam.openflow.types.Datapath): def __init__ (self, dom): super(GENIDatapath, self).__init__() self.component_id = None if dom.tag == u'{%s}datapath' % (OFNSv3): self.__parse_openflowv3_datapath(dom) def __parse_openflowv3_datapath (self, dom): self.component_id = dom.get("component_id") cmid = dom.get("component_manager_id") if self.component_id.count(cmid[:-12]) != 1: raise ComponentManagerIDMismatch(self.component_id, cmid) if cmid != getManagerID(): raise UnknownComponentManagerID(self.component_id) self.dpid = GeniDB.getSwitchDPID(self.component_id) self.ports = set() for port in dom.findall('{%s}port' % (OFNSv3)): p = foam.openflow.types.Port() p.num = int(port.get("num")) p.dpid = self.dpid self.ports.add(p) def __str__ (self): return "[%s] %s\n%s\n" % (self.dpid, self.component_id, " ".join(["%s" % (str(x)) for x in self.ports])) def __json__ (self): return {"dpid" : self.dpid, "component" : self.component_id, "ports" : [x.__json__() for x in self.ports]} def getSliverStatus (sliver_urn): try: slice_name = GeniDB.getFlowvisorSliceName(sliver_urn=sliver_urn) if FV.sliceExists(slice_name): return "ready" else: return "configuring" except Exception, e: return "failed" def importSliver (opts): # Quick dirty way to find out if a sliver exists try: GeniDB.getSliverPriority(opts["sliver_urn"]) return except UnknownSliver, e: pass s = StringIO(str(opts["req_rspec"])) rspec_dom = ET.parse(s) root_elem = rspec_dom.getroot() sliver = GENISliver(rspec_dom) sliver._uuid = opts["fvslicename"] GeniDB.insertSliver(opts["slice_urn"], sliver, str(opts["req_rspec"]), opts["exp"]) GeniDB.setSliverPriority(opts["sliver_urn"], opts["priority"]) GeniDB.setSliverStatus(opts["sliver_urn"], opts["status"]) if opts["deleted"]: GeniDB.deleteSliver(sliver_urn = opts["sliver_urn"]) class GENISliver(foam.flowvisor.FSAllocation): def __init__ (self, dom): super(GENISliver, self).__init__() self.__urn = None self.__slice_urn = None self.__user_urn = None self.__ref = None self.__pend_reason = None if dom: self.__parseDatav3(dom) def __str__ (self): x = super(GENISliver, self).__str__() return "<GENI Sliver: %s\n %s, %s>\n%s" % (self.__ref, self.__urn, self.__user_urn, x) def __json__ (self): data = self.getDataDict(True) if self.__urn: dbdata = GeniDB.getSliverData(self.__urn, False) data.update(dbdata) return data def store (self): GeniDB.updateSliverObj(self.__urn, self) def json_flowspec (self): return {"groups" : [{k : [x.__json__() for x in v]} for k,v in self._groups.iteritems()], "flowspecs" : [x.__json__() for x in self._flowspecs]} def setPendReason (self, reason): self.__pend_reason = reason def getURN (self): return self.__urn def getSliceURN (self): return self.__slice_urn def getUserURN (self): return self.__user_urn def makeController (self, elem): c = foam.openflow.types.Controller() c.type = elem.get("type") c.url = elem.get("url") return c def __parseDatav3 (self, dom): flog = logging.getLogger('foam') sliver_dom = dom.find('{%s}sliver' % (OFNSv3)) if sliver_dom is None: flog.exception("No Sliver Tag") #raise NoSliverTag() raise Exception() self.setEmail(sliver_dom.get("email", None)) self.setDescription(sliver_dom.get("description", None)) self.__ref = sliver_dom.get("ref", None) controller_elems = sliver_dom.findall('{%s}controller' % (OFNSv3)) if controller_elems is None: raise NoControllersDefined() for elem in controller_elems: self.addController(self.makeController(elem)) groups = sliver_dom.findall('{%s}group' % (OFNSv3)) for grp in groups: dplist = [] grpname = grp.get("name") if grpname is None: raise NoGroupName() datapaths = grp.findall('{%s}datapath' % (OFNSv3)) for dp in datapaths: # try: dplist.append(GENIDatapath(dp)) # except UnmanagedComponent, e: # continue self.addGroup(grpname, dplist) matches = sliver_dom.findall('{%s}match' % (OFNSv3)) for flowspec in matches: fs = self.parseFlowSpec(flowspec, OFNSv3) self.addFlowSpec(fs) vlinks = sliver_dom.findall('{%s}vlink' % (OFNSv3)) for virtuallink in vlinks: vl = self.parseVirtualLink(virtuallink, OFNSv3) self.addVirtualLink(vl) def validate (self): super(GENISliver, self).validate() if self.getEmail() is None: raise NoExperimenterEmail() for fs in self.getFlowspecs(): for dltype in fs.getEtherTypes(): if dltype == "0x88cc": raise IllegalEthertype(dltype) def getExpiration (self): return GeniDB.getSliverExpiration(self.__urn) def emailCheck (self, now): tdw = datetime.timedelta(7) tdd = datetime.timedelta(hours=30) exp = self.getExpiration() if not self.getEmailStatus("day"): if now + tdd > exp: foam.task.emailSliverExpDay(GeniDB.getSliverData(self.__urn, True)) self.setEmailStatus("day") self.setEmailStatus("week") self.store() return (self.__urn, 1) if not self.getEmailStatus("week"): if now + tdw > exp: foam.task.emailSliverExpWeek(GeniDB.getSliverData(self.__urn, True)) self.setEmailStatus("week") self.store() return (self.__urn, 2) return (self.__urn, 0) def getDataDict (self, detail = True): obj = super(GENISliver, self).getDataDict(detail) obj["user"] = self.__user_urn obj["sliver_urn"] = self.__urn obj["ref"] = self.__ref obj["pend_reason"] = self.__pend_reason return obj def setSliverURN (self, sliver_urn): self.__urn = sliver_urn def setUserURN (self, user_urn): self.__user_urn = user_urn def setUserEmail (self, email, overwrite=False): if overwrite: self.setEmail(email) elif self.getEmail() is None: self.setEmail(email) def generateURN (self, slice_urn): self.__slice_urn = slice_urn return "%s:%s" % (slice_urn, self.getUUID()) def parseVirtualLink (self, elem, ns): vl = VirtualLink() hopsdom = elem.find("{%s}hops" % (ns)) if hopsdom is None: raise NoHopsTag(elem) #TODO: put the "use-group" stuff here linkstr = "" hops = hopsdom.findall('{%s}hop' % (ns)) for hop in hops: hopstr = hop.get("link").strip() if hop.get("index").strip() is not "1": linkstr += "," linkstr += hopstr vl.addVLinkFromString(linkstr) return vl def parseFlowSpec (self, elem, ns): fs = FlowSpec() packetdom = elem.find("{%s}packet" % (ns)) if packetdom is None: raise NoPacketTag(elem) use_groups = elem.findall('{%s}use-group' % (ns)) for grp in use_groups: grpname = grp.get("name") datapaths = self.getGroupDatapaths(grpname) for dp in datapaths: fs.bindDatapath(dp) nodes = elem.findall('{%s}datapath' % (ns)) for dpnode in nodes: dp = GENIDatapath(dpnode) fs.bindDatapath(dp) nodes = packetdom.findall('{%s}dl_src' % (ns)) for dls in nodes: macstr = dls.get("value").strip() fs.addDlSrcFromString(macstr) nodes = packetdom.findall('{%s}dl_dst' % (ns)) for dld in nodes: macstr = dld.get("value").strip() fs.addDlDstFromString(macstr) nodes = packetdom.findall('{%s}dl_type' % (ns)) for dlt in nodes: dltstr = dlt.get("value").strip() fs.addDlTypeFromString(dltstr) nodes = packetdom.findall('{%s}dl_vlan' % (ns)) for elem in nodes: vlidstr = elem.get("value").strip() fs.addVlanIDFromString(vlidstr) nodes = packetdom.findall('{%s}nw_src' % (ns)) for elem in nodes: nwstr = elem.get("value").strip() fs.addNwSrcFromString(nwstr) nodes = packetdom.findall('{%s}nw_dst' % (ns)) for elem in nodes: nwstr = elem.get("value").strip() fs.addNwDstFromString(nwstr) nodes = packetdom.findall('{%s}nw_proto' % (ns)) for elem in nodes: nwproto = elem.get("value").strip() fs.addNwProtoFromString(nwproto) nodes = packetdom.findall('{%s}tp_src' % (ns)) for elem in nodes: tpsrc = elem.get("value").strip() fs.addTpSrcFromString(tpsrc) nodes = packetdom.findall('{%s}tp_dst' % (ns)) for elem in nodes: tpdst = elem.get("value").strip() fs.addTpDstFromString(tpdst) return fs
{ "content_hash": "7ba4b8991172c8c52b29ed8266998ee5", "timestamp": "", "source": "github", "line_count": 554, "max_line_length": 119, "avg_line_length": 31.180505415162454, "alnum_prop": 0.6535255296978117, "repo_name": "dana-i2cat/felix", "id": "519929b3b929f9fd0f5a751f7d493b2d0b5792a6", "size": "17365", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "ofam/src/src/foam/geni/lib.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "337811" }, { "name": "DTrace", "bytes": "370" }, { "name": "Elixir", "bytes": "17243" }, { "name": "Emacs Lisp", "bytes": "1098" }, { "name": "Groff", "bytes": "1735" }, { "name": "HTML", "bytes": "660363" }, { "name": "Java", "bytes": "18362" }, { "name": "JavaScript", "bytes": "838960" }, { "name": "Makefile", "bytes": "11211" }, { "name": "Perl", "bytes": "5416" }, { "name": "Python", "bytes": "7875883" }, { "name": "Shell", "bytes": "258079" } ], "symlink_target": "" }
""" Run all test cases. """ import sys import unittest from test.test_support import requires, verbose, run_suite # When running as a script instead of within the regrtest framework, skip the # requires test, since it's obvious we want to run them. if __name__ <> '__main__': requires('bsddb') verbose = False if 'verbose' in sys.argv: verbose = True sys.argv.remove('verbose') if 'silent' in sys.argv: # take care of old flag, just in case verbose = False sys.argv.remove('silent') def suite(): test_modules = [ 'test_associate', 'test_basics', 'test_compat', 'test_dbobj', 'test_dbshelve', 'test_dbtables', 'test_env_close', 'test_get_none', 'test_join', 'test_lock', 'test_misc', 'test_queue', 'test_recno', 'test_thread', ] alltests = unittest.TestSuite() for name in test_modules: module = __import__("bsddb.test."+name, globals(), locals(), name) #print module,name alltests.addTest(module.test_suite()) return alltests # For invocation through regrtest def test_main(): tests = suite() run_suite(tests) # For invocation as a script if __name__ == '__main__': from bsddb import db print '-=' * 38 print db.DB_VERSION_STRING print 'bsddb.db.version(): %s' % (db.version(),) print 'bsddb.db.__version__: %s' % db.__version__ print 'bsddb.db.cvsid: %s' % db.cvsid print 'python version: %s' % sys.version print '-=' * 38 unittest.main(defaultTest='suite')
{ "content_hash": "5675cf1e26aaed9f6b5fdae254662bc6", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 77, "avg_line_length": 24.393939393939394, "alnum_prop": 0.5850931677018634, "repo_name": "MalloyPower/parsing-python", "id": "cd6ccc6053ccb467d6a54864a79e0c5aad59887a", "size": "1643", "binary": false, "copies": "17", "ref": "refs/heads/master", "path": "front-end/testsuite-python-lib/Python-2.3/Lib/test/test_bsddb3.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1963" }, { "name": "Lex", "bytes": "238458" }, { "name": "Makefile", "bytes": "4513" }, { "name": "OCaml", "bytes": "412695" }, { "name": "Python", "bytes": "17319" }, { "name": "Rascal", "bytes": "523063" }, { "name": "Yacc", "bytes": "429659" } ], "symlink_target": "" }
from __future__ import print_function from builtins import range #!/usr/bin/env python #--------------------------------------------------------------------------- # Copyright 2013 The Open Source Electronic Health Record Agent # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #--------------------------------------------------------------------------- import os import re def remove_duplicates_preserve_order(items): found = set([]) keep = [] for item in items: if item not in found: found.add(item) keep.append(item) return keep def check_dir(element, final_list): #replace all "\\ " with " " as os.path cannot check escaped spaces elementPath = re.subn(r'\\\s', ' ', element)[0] if os.path.isdir(elementPath): final_list.append(elementPath) def parse_gtmroutines(): var = os.getenv('gtmroutines') final_list = extract_m_source_dirs(var) final_str = ';'.join(final_list) print(final_str) def extract_m_source_dirs(var): #First, replace unescaped spaces with semicolons tmp = var.replace(" ",";").replace("\;","\ ") tmpl = tmp.split(";") num_elements = len(tmpl) final_list = [] for ind in range(num_elements): element = tmpl[ind] element = element.strip(")") paren_check = [m.start() for m in re.finditer("\(",element)] if not paren_check: check_dir(element, final_list) else: stripElement = element[paren_check[0]+1:] check_dir(stripElement, final_list) # Remove duplicates, and print the semicolon separated string final_list = remove_duplicates_preserve_order(final_list) return final_list if __name__ == "__main__": parse_gtmroutines()
{ "content_hash": "f64ae707c4ed3acc04ef8c7256cf9880", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 76, "avg_line_length": 32.54545454545455, "alnum_prop": 0.6443202979515829, "repo_name": "josephsnyder/VistA", "id": "457219f8d4dfd94b8e86c6a8570abcae5525b62d", "size": "2148", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Testing/Python/ParseGTMRoutines.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "6315" }, { "name": "Brightscript", "bytes": "297" }, { "name": "CMake", "bytes": "120273" }, { "name": "CSS", "bytes": "132661" }, { "name": "Genshi", "bytes": "72951258" }, { "name": "HTML", "bytes": "2296661" }, { "name": "JavaScript", "bytes": "2341060" }, { "name": "M", "bytes": "483901" }, { "name": "PHP", "bytes": "6750" }, { "name": "Pascal", "bytes": "17825658" }, { "name": "Python", "bytes": "1473431" }, { "name": "Ruby", "bytes": "12147" }, { "name": "Shell", "bytes": "99067" } ], "symlink_target": "" }
""" Example of using ImageIndicator class. """ from ggame.mathapp import MathApp from ggame.indicator import ImageIndicator from ggame.inputpoint import InputImageButton from ggame.asset import Frame BUTTON = InputImageButton( "images/button-round.png", None, (40, 105), positioning="physical", frame=Frame(0, 0, 100, 100), qty=2, ) BUTTON.scale = 0.5 LIGHT = ImageIndicator( "images/red-led-off-on.png", (100, 100), BUTTON, # button object supplies the indicator state. positioning="physical", frame=Frame(0, 0, 600, 600), qty=2, ) LIGHT.scale = 0.1 MathApp().run()
{ "content_hash": "8d309543eb5123f8a6e075f81a5ff7a4", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 58, "avg_line_length": 20.666666666666668, "alnum_prop": 0.6806451612903226, "repo_name": "tiggerntatie/ggame", "id": "78de0e54909e18443c6363059723b2a3d496ce87", "size": "620", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/indicatorimageindicator.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "203298" }, { "name": "Shell", "bytes": "1010" } ], "symlink_target": "" }
import sys, os # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.append(os.path.abspath('.')) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'flimp' copyright = u'2010, Fluidinfo Inc' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.6' # The full version, including alpha/beta/rc tags. release = '0.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['.build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/<name>. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'flimpdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'flimp.tex', ur'flimp Documentation', ur'Nicholas H.Tollervey', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
{ "content_hash": "e9160f1f6cc1126888a99036c6157ebb", "timestamp": "", "source": "github", "line_count": 174, "max_line_length": 81, "avg_line_length": 31.166666666666668, "alnum_prop": 0.717315139221833, "repo_name": "fluidinfo/flimp", "id": "17999a682f345e9af05cfa455e11716a922263db", "size": "6002", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "121" }, { "name": "Python", "bytes": "69182" } ], "symlink_target": "" }
import json from urllib.request import urlopen from bs4 import BeautifulSoup from request_handler import RequestHandler # Takes in a url and returns a soup def get_soup(url): html = urlopen(url).read() return BeautifulSoup(html, 'html.parser') # Takes a string and returns the substring after pre and before post def strip_name(string, pre, post): return string.split(pre)[1].split(post)[0] # Sending individual categories (documents) to database def send_to_db(cat_name, info_object): # TODO: replace certain characters which are formatted incorrectly # info_object['name'] = info_object['name'].replace('&amp', '&') item = {} data = {} sub_data = {} item['message_type'] = 'write' item['collection'] = cat_name.replace('$', '').replace('-', '').replace('.', '').replace(' ', '').lower() item['data'] = info_object json_data = json.dumps(item, indent = 2) service = RequestHandler() service.handle_crawler(json_data)
{ "content_hash": "7eacc240877ec5a89b5c21f360430967", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 109, "avg_line_length": 29.78787878787879, "alnum_prop": 0.6683621566632757, "repo_name": "BenjaminLang/cpen_321", "id": "7553fafecb99e6e4f67e7cbf38a170545909cfa8", "size": "983", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Server/src/crawl_lib.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "21946" }, { "name": "HTML", "bytes": "26629" }, { "name": "Java", "bytes": "69234" }, { "name": "JavaScript", "bytes": "25107" }, { "name": "Python", "bytes": "41484" } ], "symlink_target": "" }
""" Django settings for file_upload project. Generated by 'django-admin startproject' using Django 1.8.1. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '=kynv=+o@776r4vhu808vn85jz5db4_upv1a!gf04^8!d80j@k' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'minimal', 'filefield', 'chunked', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'file_upload.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ 'minimal/templates/minimal', 'filefield/templates/filefield', 'chunked/templates/chunked', ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'file_upload.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' # Media files MEDIA_ROOT = '/data/uploads/' # Logging LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' }, }, 'handlers': { 'console':{ 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' }, }, 'loggers': { 'django': { 'handlers': ['console'], 'propagate': True, 'level': 'DEBUG', }, 'django.request': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } }
{ "content_hash": "3b15849efc1e7c763b7b5cd94c13b39b", "timestamp": "", "source": "github", "line_count": 151, "max_line_length": 95, "avg_line_length": 25.04635761589404, "alnum_prop": 0.6181914331041777, "repo_name": "grschafer/django-meetup-fileupload", "id": "9bf261817735da80e95c1cb1d739e1916c8f86a0", "size": "3806", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "file_upload/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "401" }, { "name": "Python", "bytes": "16053" } ], "symlink_target": "" }
import tensorflow as tf filename_queue = tf.train.string_input_producer([ "hdfs://hdfs:9000/hdfs/file1.csv", "hdfs://hdfs:9000/hdfs/file2.csv", ]) reader = tf.TextLineReader() key, value = reader.read(filename_queue) # Default values, in case of empty columns. Also specifies the type of the # decoded result. record_defaults = [[1], [1], [1], [1], [1]] col1, col2, col3, col4, col5 = tf.decode_csv( value, record_defaults=record_defaults) features = tf.stack([col1, col2, col3, col4]) with tf.Session() as sess: # Start populating the filename queue. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) for i in range(1200): # Retrieve a single instance: example, label = sess.run([features, col5]) print(example, label) coord.request_stop() coord.join(threads)
{ "content_hash": "539e87e88a3d39efba505c359b3cb66a", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 74, "avg_line_length": 28.689655172413794, "alnum_prop": 0.6935096153846154, "repo_name": "Resly/pipeline", "id": "8165d6252957c9ba7fac5321b9ea62bb6c48345c", "size": "832", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "clustered.ml/tensorflow/src/hdfs-test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "36325" }, { "name": "Batchfile", "bytes": "21218" }, { "name": "C", "bytes": "1759" }, { "name": "C++", "bytes": "50538" }, { "name": "CSS", "bytes": "441446" }, { "name": "Cuda", "bytes": "3113" }, { "name": "Go", "bytes": "9555" }, { "name": "HTML", "bytes": "48376774" }, { "name": "Java", "bytes": "108962" }, { "name": "JavaScript", "bytes": "539670" }, { "name": "Jupyter Notebook", "bytes": "18176491" }, { "name": "Makefile", "bytes": "357" }, { "name": "Protocol Buffer", "bytes": "137774" }, { "name": "Python", "bytes": "667334" }, { "name": "Scala", "bytes": "366964" }, { "name": "Shell", "bytes": "110692" }, { "name": "XSLT", "bytes": "26188" } ], "symlink_target": "" }
import json import math import numpy as np import pandas as pd from sklearn.metrics import mean_squared_error, mean_absolute_error import google.datalab.bigquery as bq from . import _util class Metrics(object): """Represents a Metrics object that computes metrics from raw evaluation results.""" def __init__(self, input_csv_pattern=None, headers=None, bigquery=None): """ Args: input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path. headers: Csv headers. Required if input_csv_pattern is not None. bigquery: Can be one of: A BigQuery query string. A Bigquery table string. A Query object defined with '%%bq query --name [query_name]'. Raises: ValueError if input_csv_pattern is provided but both headers and schema_file are None. ValueError if but both input_csv_pattern and bigquery are None. """ self._input_csv_files = None self._bigquery = None if input_csv_pattern: self._input_csv_files = _util.glob_files(input_csv_pattern) if not headers: raise ValueError('csv requires headers.') self._headers = headers elif bigquery: self._bigquery = bigquery else: raise ValueError('Either input_csv_pattern or bigquery needs to be provided.') @staticmethod def from_csv(input_csv_pattern, headers=None, schema_file=None): """Create a Metrics instance from csv file pattern. Args: input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path. headers: Csv headers. schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None. Returns: a Metrics instance. Raises: ValueError if both headers and schema_file are None. """ if headers is not None: names = headers elif schema_file is not None: with _util.open_local_or_gcs(schema_file, mode='r') as f: schema = json.load(f) names = [x['name'] for x in schema] else: raise ValueError('Either headers or schema_file is needed') metrics = Metrics(input_csv_pattern=input_csv_pattern, headers=names) return metrics @staticmethod def from_bigquery(sql): """Create a Metrics instance from a bigquery query or table. Returns: a Metrics instance. Args: sql: A BigQuery table name or a query. """ if isinstance(sql, bq.Query): sql = sql._expanded_sql() parts = sql.split('.') if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts): sql = '(' + sql + ')' # query, not a table name else: sql = '`' + sql + '`' # table name metrics = Metrics(bigquery=sql) return metrics def _get_data_from_csv_files(self): """Get data from input csv files.""" all_df = [] for file_name in self._input_csv_files: with _util.open_local_or_gcs(file_name, mode='r') as f: all_df.append(pd.read_csv(f, names=self._headers)) df = pd.concat(all_df, ignore_index=True) return df def _get_data_from_bigquery(self, queries): """Get data from bigquery table or query.""" all_df = [] for query in queries: all_df.append(query.execute().result().to_dataframe()) df = pd.concat(all_df, ignore_index=True) return df def accuracy(self): """Get accuracy numbers for each target and overall. Returns: A DataFrame with two columns: 'class' and 'accuracy'. It also contains the overall accuracy with class being '_all'. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column. """ if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or 'predicted' not in df: raise ValueError('Cannot find "target" or "predicted" column') labels = sorted(set(df['target']) | set(df['predicted'])) accuracy_results = [] for label in labels: correct_count = len(df[(df['target'] == df['predicted']) & (df['target'] == label)]) total_count = len(df[(df['target'] == label)]) accuracy_results.append({ 'target': label, 'accuracy': float(correct_count) / total_count if total_count > 0 else 0, 'count': total_count }) total_correct_count = len(df[(df['target'] == df['predicted'])]) if len(df) > 0: total_accuracy = float(total_correct_count) / len(df) accuracy_results.append({'target': '_all', 'accuracy': total_accuracy, 'count': len(df)}) return pd.DataFrame(accuracy_results) elif self._bigquery: query = bq.Query(""" SELECT target, SUM(CASE WHEN target=predicted THEN 1 ELSE 0 END)/COUNT(*) as accuracy, COUNT(*) as count FROM %s GROUP BY target""" % self._bigquery) query_all = bq.Query(""" SELECT "_all" as target, SUM(CASE WHEN target=predicted THEN 1 ELSE 0 END)/COUNT(*) as accuracy, COUNT(*) as count FROM %s""" % self._bigquery) df = self._get_data_from_bigquery([query, query_all]) return df def roc(self, num_thresholds, target_class, probability_column=None): """Get true positive rate, false positive rate values from evaluation results. Args: num_thresholds: an integer. Number of thresholds. target_class: a string indciating the target class, i.e. "daisy" in flower classification. probability_column: the name of the probability column. If None, defaults to value of target_class. Returns: A DataFrame with columns: 'tpr', 'fpr', 'threshold' with number of rows equal to num_thresholds. Raises: Exception if the CSV headers do not include 'target' or probability_column, or BigQuery does not return 'target' or probability_column column. """ if not probability_column: probability_column = target_class thresholds = np.linspace(0, 1, num_thresholds + 1) if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or probability_column not in df: raise ValueError('Cannot find "target" or "%s" column' % probability_column) total_positive = sum(1 for x in df['target'] if x == target_class) total_negative = len(df) - total_positive true_positives, false_positives = [], [] for threshold in thresholds: true_positive_count = len(df[(df[probability_column] > threshold) & (df['target'] == target_class)]) false_positive_count = len(df[(df[probability_column] > threshold) & (df['target'] != target_class)]) true_positives.append(true_positive_count) false_positives.append(false_positive_count) data = [] for tp, fp, t in zip(true_positives, false_positives, thresholds): tpr = (float)(tp) / total_positive if total_positive > 0. else 0. fpr = (float)(fp) / total_negative if total_negative > 0. else 0. data.append({'tpr': tpr, 'fpr': fpr, 'threshold': t}) return pd.DataFrame(data) elif self._bigquery: true_positive_query = bq.Query(""" SELECT COUNT(*) as true_positive FROM %s CROSS JOIN (SELECT * FROM UNNEST ([%s]) as t) WHERE %s > t AND target = '%s' GROUP BY t ORDER BY t """ % (self._bigquery, ','.join(map(str, thresholds)), probability_column, target_class)) false_positive_query = bq.Query(""" SELECT COUNT(*) as false_positive FROM %s CROSS JOIN (SELECT * FROM UNNEST ([%s]) as t) WHERE %s > t AND target != '%s' GROUP BY t ORDER BY t """ % (self._bigquery, ','.join(map(str, thresholds)), probability_column, target_class)) total_positive_query = bq.Query(""" SELECT COUNT(*) as total_positive FROM %s WHERE target = '%s' """ % (self._bigquery, target_class)) total_negative_query = bq.Query(""" SELECT COUNT(*) as total_negative FROM %s WHERE target != '%s' """ % (self._bigquery, target_class)) true_positives = true_positive_query.execute().result() false_positives = false_positive_query.execute().result() total_positive = total_positive_query.execute().result()[0]['total_positive'] total_negative = total_negative_query.execute().result()[0]['total_negative'] data = [] for tp, fp, t in zip(true_positives, false_positives, thresholds): tpr = (float)(tp['true_positive']) / total_positive if total_positive > 0. else 0. fpr = (float)(fp['false_positive']) / total_negative if total_negative > 0. else 0. data.append({'tpr': tpr, 'fpr': fpr, 'threshold': t}) data.append({'tpr': 0., 'fpr': 0., 'threshold': 1.0}) return pd.DataFrame(data) def precision_recall(self, num_thresholds, target_class, probability_column=None): """Get precision, recall values from evaluation results. Args: num_thresholds: an integer. Number of thresholds. target_class: a string indciating the target class, i.e. "daisy" in flower classification. probability_column: the name of the probability column. If None, defaults to value of target_class. Returns: A DataFrame with columns: 'threshold', 'precision', 'recall' with number of rows equal to num_thresholds. Raises: Exception if the CSV headers do not include 'target' or probability_column, or BigQuery does not return 'target' or probability_column column. """ if not probability_column: probability_column = target_class # threshold = 1.0 is excluded. thresholds = np.linspace(0, 1, num_thresholds + 1)[0:-1] if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or probability_column not in df: raise ValueError('Cannot find "target" or "%s" column' % probability_column) total_target = sum(1 for x in df['target'] if x == target_class) total_predicted = [] correct_predicted = [] for threshold in thresholds: predicted_count = sum(1 for x in df[probability_column] if x > threshold) total_predicted.append(predicted_count) correct_count = len(df[(df[probability_column] > threshold) & (df['target'] == target_class)]) correct_predicted.append(correct_count) data = [] for p, c, t in zip(total_predicted, correct_predicted, thresholds): precision = (float)(c) / p if p > 0. else 0. recall = (float)(c) / total_target if total_target > 0. else 0. data.append({'precision': precision, 'recall': recall, 'threshold': t}) return pd.DataFrame(data) elif self._bigquery: total_predicted_query = bq.Query(""" SELECT COUNT(*) as total_predicted FROM %s CROSS JOIN (SELECT * FROM UNNEST ([%s]) as t) WHERE %s > t GROUP BY t ORDER BY t """ % (self._bigquery, ','.join(map(str, thresholds)), probability_column)) correct_predicted_query = bq.Query(""" SELECT COUNT(*) as correct_predicted FROM %s CROSS JOIN (SELECT * FROM UNNEST ([%s]) as t) WHERE %s > t AND target='%s' GROUP BY t ORDER BY t """ % (self._bigquery, ','.join(map(str, thresholds)), probability_column, target_class)) total_target_query = bq.Query(""" SELECT COUNT(*) as total_target FROM %s WHERE target='%s' """ % (self._bigquery, target_class)) total_predicted = total_predicted_query.execute().result() correct_predicted = correct_predicted_query.execute().result() total_target = total_target_query.execute().result()[0]['total_target'] data = [] for p, c, t in zip(total_predicted, correct_predicted, thresholds): precision = ((float)(c['correct_predicted']) / p['total_predicted'] if p['total_predicted'] > 0. else 0.) recall = (float)(c['correct_predicted']) / total_target if total_target > 0. else 0. data.append({'precision': precision, 'recall': recall, 'threshold': t}) return pd.DataFrame(data) def rmse(self): """Get RMSE for regression model evaluation results. Returns: the RMSE float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number. """ if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or 'predicted' not in df: raise ValueError('Cannot find "target" or "predicted" column') df = df[['target', 'predicted']].apply(pd.to_numeric) # if df is empty or contains non-numeric, scikit learn will raise error. mse = mean_squared_error(df['target'], df['predicted']) return math.sqrt(mse) elif self._bigquery: query = bq.Query(""" SELECT SQRT(SUM(ABS(predicted-target) * ABS(predicted-target)) / COUNT(*)) as rmse FROM %s""" % self._bigquery) df = self._get_data_from_bigquery([query]) if df.empty: return None return df['rmse'][0] def mae(self): """Get MAE (Mean Absolute Error) for regression model evaluation results. Returns: the MAE float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number. """ if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or 'predicted' not in df: raise ValueError('Cannot find "target" or "predicted" column') df = df[['target', 'predicted']].apply(pd.to_numeric) mae = mean_absolute_error(df['target'], df['predicted']) return mae elif self._bigquery: query = bq.Query(""" SELECT SUM(ABS(predicted-target)) / COUNT(*) as mae FROM %s""" % self._bigquery) df = self._get_data_from_bigquery([query]) if df.empty: return None return df['mae'][0] def percentile_nearest(self, percentile): """Get nearest percentile from regression model evaluation results. Args: percentile: a 0~100 float number. Returns: the percentile float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number. """ if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or 'predicted' not in df: raise ValueError('Cannot find "target" or "predicted" column') df = df[['target', 'predicted']].apply(pd.to_numeric) abs_errors = np.array((df['target'] - df['predicted']).apply(abs)) return np.percentile(abs_errors, percentile, interpolation='nearest') elif self._bigquery: query = bq.Query(""" SELECT PERCENTILE_DISC(ABS(predicted-target), %f) OVER() AS percentile FROM %s LIMIT 1""" % (float(percentile) / 100, self._bigquery)) df = self._get_data_from_bigquery([query]) if df.empty: return None return df['percentile'][0]
{ "content_hash": "a0f0b1f36593af9a59f873a24158780b", "timestamp": "", "source": "github", "line_count": 457, "max_line_length": 97, "avg_line_length": 34.49234135667396, "alnum_prop": 0.6092114445219818, "repo_name": "googledatalab/pydatalab", "id": "0d93ec439896d95a4b1de06e1fe25d184cbd4413", "size": "16353", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "google/datalab/ml/_metrics.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "7596" }, { "name": "Python", "bytes": "2424850" }, { "name": "Shell", "bytes": "4312" }, { "name": "TypeScript", "bytes": "105381" } ], "symlink_target": "" }
from unit_test_common import execute_csv2_command, initialize_csv2_request, ut_id, sanity_commands, parameters_commands from sys import argv # lno: UV - error code identifier. def main(gvar): if not gvar: gvar = {} if len(argv) > 1: initialize_csv2_request(gvar, selections=argv[1]) else: initialize_csv2_request(gvar) # 01 - 14 sanity_commands(gvar, 'user', 'update') # 15 execute_csv2_command( gvar, 1, None, 'the following mandatory parameters must be specified on the command line', ['user', 'update'] ) parameters = { # 16 Omit `--username`. # We cannot specify test cases for `--username` here because it would complain that we did not specify any fields to update. '--username': {'valid': ut_id(gvar, 'clu7'), 'test_cases': {}, 'mandatory': True}, '--user-password': {'valid': gvar['user_secret'], 'test_cases': { # 17 '': 'value specified for a password is less than 6 characters.', # 18 'inv': 'value specified for a password is less than 6 characters.', # 19 'invalid': 'value specified for a password is less then 16 characters, and does not contain a mixture of upper, lower, and numerics.' }}, # 20 '--super-user': {'valid': 0, 'test_cases': {'invalid-unit-test': 'boolean value specified for "is_superuser" must be one of the following: true, false, yes, no, 1, or 0.'}}, # 21 '--group-option': {'valid': 'add', 'test_cases': {'invalid-unit-test': 'value specified for "group_option" must be one of the following options: [\'add\', \'delete\'].'}} } parameters_commands(gvar, 'user', 'update', ut_id(gvar, 'clg1'), ut_id(gvar, 'clu4'), parameters) # 22 Specify a user that does not exist. execute_csv2_command( gvar, 1, 'UV', 'the request did not match any rows.', ['user', 'update', '-un', 'invalid-unit-test', '-upw', gvar['user_secret'], '-g', ut_id(gvar, 'clg1')] ) # 23 execute_csv2_command( gvar, 1, None, '"cloudscheduler user update" requires at least one option to update.', ['user', 'update', '-un', ut_id(gvar, 'clu7')] ) # 24 execute_csv2_command( gvar, 1, None, 'The following command line arguments were invalid: only-keys', ['user', 'update', '-ok'] ) # 25 execute_csv2_command( gvar, 1, 'UV', 'common name "{0}" conflicts with registered user "{0}".'.format(ut_id(gvar, 'clu3')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-ucn', ut_id(gvar, 'clu3')] ) # 26 execute_csv2_command( gvar, 1, 'UV', 'common name "{}" conflicts with registered user "{}".'.format(ut_id(gvar, 'command-line-user-3'), ut_id(gvar, 'clu3')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-ucn', ut_id(gvar, 'command-line-user-3')] ) # 27 We have to specify server and server password explicitly because `--super-user` has to be the last parameter. execute_csv2_command( gvar, 1, None, 'Value omitted for option: -SU | --super-user', ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-s', 'unit-test', '-spw', gvar['user_secret'], '--super-user'] ) # 28 execute_csv2_command( gvar, 1, 'UV', 'user update must specify at least one field to update.', ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-go', 'add'] ) # 29 execute_csv2_command( gvar, 1, None, 'user update, parameter "group_name" contains an empty string which is specifically disallowed.', ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', ''] ) # 30 execute_csv2_command( gvar, 1, 'UV', 'specified group "invalid-unit-test" does not exist.', ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', 'invalid-unit-test'] ) # 31 execute_csv2_command( gvar, 1, 'UV', 'user update, "{}" failed - group "{}" was specified twice.'.format(ut_id(gvar, 'clu7'), ut_id(gvar, 'clg1')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', ut_id(gvar, 'clg1,clg1')] ) # 32 execute_csv2_command( gvar, 0, None, 'user "{}" successfully updated.'.format(ut_id(gvar, 'clu7')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-ucn', ''] ) # 33 execute_csv2_command( gvar, 0, None, 'user "{}" successfully updated.'.format(ut_id(gvar, 'clu7')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-ucn', ut_id(gvar, 'command-line-user-update'), '-SU', 'yes'] ) # 34 Implicitly add clu7 to clg1. execute_csv2_command( gvar, 0, None, 'user "{}" successfully updated.'.format(ut_id(gvar, 'clu7')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', ut_id(gvar, 'clg1')] ) # 35 Explicitly delete clu7 from clg1. execute_csv2_command( gvar, 0, None, 'user "{}" successfully updated.'.format(ut_id(gvar, 'clu7')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', ut_id(gvar, 'clg1'), '-go', 'delete'] ) # 36 Explicitly add clu7 to clg1. execute_csv2_command( gvar, 0, None, 'user "{}" successfully updated.'.format(ut_id(gvar, 'clu7')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', ut_id(gvar, 'clg1'), '-go', 'add'] ) # 37 Explicitly delete clu7 from clg1 and clg3. execute_csv2_command( gvar, 0, None, 'user "{}" successfully updated.'.format(ut_id(gvar, 'clu7')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', ut_id(gvar, 'clg1,clg3'), '-go', 'delete'] ) # 38 Implicitly add clu7 to clg1 and clg3. execute_csv2_command( gvar, 0, None, 'user "{}" successfully updated.'.format(ut_id(gvar, 'clu7')), ['user', 'update', '-un', ut_id(gvar, 'clu7'), '-gn', ut_id(gvar, 'clg1,clg3')] ) if __name__ == "__main__": main(None)
{ "content_hash": "0fa1e64d28b7799e1b1d9baf90f227a3", "timestamp": "", "source": "github", "line_count": 146, "max_line_length": 181, "avg_line_length": 40.678082191780824, "alnum_prop": 0.5633945108604143, "repo_name": "hep-gc/cloudscheduler", "id": "72260d03aae52dbdec3aec3e31cbc3fbe9a2f079", "size": "5939", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "unit_tests/test_cli_user_update.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "71824" }, { "name": "Gherkin", "bytes": "1017" }, { "name": "HTML", "bytes": "362015" }, { "name": "JavaScript", "bytes": "144210" }, { "name": "Jinja", "bytes": "51122" }, { "name": "Python", "bytes": "2725635" }, { "name": "Roff", "bytes": "189652" }, { "name": "Shell", "bytes": "33321" } ], "symlink_target": "" }
"""generate_gradient.py ~~~~~~~~~~~~~~~~~~~~~~~ Use network2 to figure out the average starting values of the gradient error terms \delta^l_j = \partial C / \partial z^l_j = \partial C / \partial b^l_j. """ #### Libraries # Standard library import json import math import random import shutil import sys sys.path.append("../src/") # My library import mnist_loader import network2 # Third-party libraries import matplotlib.pyplot as plt import numpy as np def main(): # Load the data full_td, _, _ = mnist_loader.load_data_wrapper() td = full_td[:1000] # Just use the first 1000 items of training data epochs = 500 # Number of epochs to train for print "\nTwo hidden layers:" net = network2.Network([784, 30, 30, 10]) initial_norms(td, net) abbreviated_gradient = [ ag[:6] for ag in get_average_gradient(net, td)[:-1]] print "Saving the averaged gradient for the top six neurons in each "+\ "layer.\nWARNING: This will affect the look of the book, so be "+\ "sure to check the\nrelevant material (early chapter 5)." f = open("initial_gradient.json", "w") json.dump(abbreviated_gradient, f) f.close() shutil.copy("initial_gradient.json", "../../js/initial_gradient.json") training(td, net, epochs, "norms_during_training_2_layers.json") plot_training( epochs, "norms_during_training_2_layers.json", 2) print "\nThree hidden layers:" net = network2.Network([784, 30, 30, 30, 10]) initial_norms(td, net) training(td, net, epochs, "norms_during_training_3_layers.json") plot_training( epochs, "norms_during_training_3_layers.json", 3) print "\nFour hidden layers:" net = network2.Network([784, 30, 30, 30, 30, 10]) initial_norms(td, net) training(td, net, epochs, "norms_during_training_4_layers.json") plot_training( epochs, "norms_during_training_4_layers.json", 4) def initial_norms(training_data, net): average_gradient = get_average_gradient(net, training_data) norms = [list_norm(avg) for avg in average_gradient[:-1]] print "Average gradient for the hidden layers: "+str(norms) def training(training_data, net, epochs, filename): norms = [] for j in range(epochs): average_gradient = get_average_gradient(net, training_data) norms.append([list_norm(avg) for avg in average_gradient[:-1]]) print "Epoch: %s" % j net.SGD(training_data, 1, 1000, 0.1, lmbda=5.0) f = open(filename, "w") json.dump(norms, f) f.close() def plot_training(epochs, filename, num_layers): f = open(filename, "r") norms = json.load(f) f.close() fig = plt.figure() ax = fig.add_subplot(111) colors = ["#2A6EA6", "#FFA933", "#FF5555", "#55FF55", "#5555FF"] for j in range(num_layers): ax.plot(np.arange(epochs), [n[j] for n in norms], color=colors[j], label="Hidden layer %s" % (j+1,)) ax.set_xlim([0, epochs]) ax.grid(True) ax.set_xlabel('Number of epochs of training') ax.set_title('Speed of learning: %s hidden layers' % num_layers) ax.set_yscale('log') plt.legend(loc="upper right") fig_filename = "training_speed_%s_layers.png" % num_layers plt.savefig(fig_filename) shutil.copy(fig_filename, "../../images/"+fig_filename) plt.show() def get_average_gradient(net, training_data): nabla_b_results = [net.backprop(x, y)[0] for x, y in training_data] gradient = list_sum(nabla_b_results) return [(np.reshape(g, len(g))/len(training_data)).tolist() for g in gradient] def zip_sum(a, b): return [x+y for (x, y) in zip(a, b)] def list_sum(l): return reduce(zip_sum, l) def list_norm(l): return math.sqrt(sum([x*x for x in l])) if __name__ == "__main__": main()
{ "content_hash": "9fc760e02725d1d7d6a11ef3997552c8", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 75, "avg_line_length": 32.33613445378151, "alnum_prop": 0.6273388773388774, "repo_name": "seanpquig/study-group", "id": "296aa95c918bfa5e0508836afcbd400eb23be435", "size": "3848", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "neural-networks-and-deep-learning/fig/generate_gradient.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "488974" }, { "name": "Python", "bytes": "140729" } ], "symlink_target": "" }
from numpy import * import matplotlib.pyplot as plt from csv import DictReader def getData(path): out = [] reader = DictReader(open("./DATA/Video_Games_Sales_as_at_22_Dec_2016.csv", "rt", encoding="utf-8")) for row in reader: out.append(row) #print(row) return out def joinData(rawDATA): out = dict() for each in rawDATA: if each["Name"] in out.keys(): temp = out[each["Name"]] temp["NA_Sales"] = temp["NA_Sales"] + float(each["NA_Sales"]) temp["EU_Sales"] = temp["EU_Sales"] + float(each["EU_Sales"]) temp["JP_Sales"] = temp["JP_Sales"] + float(each["JP_Sales"]) temp["Global_Sales"] = temp["Global_Sales"] + float(each["Global_Sales"]) temp["Platform"].append(each["Platform"]) out[each["Name"]] = temp else: each["Platform"] = [each["Platform"]] each["NA_Sales"] = float(each["NA_Sales"]) each["EU_Sales"] = float(each["EU_Sales"]) each["JP_Sales"] = float(each["JP_Sales"]) each["Global_Sales"] = float(each["Global_Sales"]) out[each["Name"]] = each rawDATA = [] for each in out: temp = out[each] temp["Name"] = each #temp["NA_Sales"] = float(temp["NA_Sales"]) #temp["EU_Sales"] = float(temp["EU_Sales"]) #temp["JP_Sales"] = float(temp["JP_Sales"]) #temp["Global_Sales"] = float(temp["Global_Sales"]) rawDATA.append(temp) return rawDATA def top10(rawDATA, type): param = 0 if type == "NA": print("\nTop 10 prodanih NA All-time:") param = 3 elif type == "EU": print("\nTop 10 prodanih EU All-time:") param = 4 elif type == "JP": print("\nTop 10 prodanih JP All-time:") param = 5 elif type == "Global": print("\nTop 10 prodanih po svetu All-time:") param = 6 games = [] for row in rawDATA: games.append((row["Name"], row["Year_of_Release"], row["Genre"], row["NA_Sales"], row["EU_Sales"], row["JP_Sales"], row["Global_Sales"], row["Critic_Score"], row["Critic_Count"], row["User_Score"], row["User_Count"])) games = sorted(games, key=lambda x: (x[param]), reverse=True) print("| Ime | Prodanih izvodov v milijonih |\n| ------------------------ | ---------------------------- |") for each in games[:10]: print( "| " + each[0] + " | " + str(each[param]) + " |") return def genresSales(rawDATA, type): param = 0 if type == "NA": plt.title("Najbolje prodajani žanri v NA") param = 0 elif type == "EU": plt.title("Najbolje prodajani žanri v EU") param = 1 elif type == "JP": plt.title("Najbolje prodajani žanri v JP") param = 2 elif type == "Global": plt.title("Najbolje prodajani žanri po svetu") param = 3 genresSales = dict() for row in rawDATA: if row["Genre"] not in genresSales: genresSales[row["Genre"]] = [row["NA_Sales"], row["EU_Sales"], row["JP_Sales"], row["Global_Sales"]] else: genresSales[row["Genre"]][0] = genresSales[row["Genre"]][0] + row["NA_Sales"] genresSales[row["Genre"]][1] = genresSales[row["Genre"]][1] + row["EU_Sales"] genresSales[row["Genre"]][2] = genresSales[row["Genre"]][2] + row["JP_Sales"] genresSales[row["Genre"]][3] = genresSales[row["Genre"]][3] + row["Global_Sales"] data = [] for key in genresSales.keys(): data.append((key, genresSales[key])) countSales = sorted(data, key=lambda x: (x[1][param]), reverse=True) ganres = [] ganresLabels = [] for each in countSales: ganresLabels.append(each[0]) ganres.append(each[1][param]) pos = arange(len(ganresLabels)) ax = plt.axes() ax.set_xticks(pos) ax.tick_params(labelsize=25) ax.set_xticklabels(ganresLabels) plt.xticks(rotation=65) plt.bar(pos, ganres, 1) plt.tight_layout() plt.figure(num=None, figsize=(15, 10), dpi=80) return def yearSales(rawDATA): yearSales = dict() for row in rawDATA: try: if float(row["Year_of_Release"]) not in yearSales.keys(): yearSales[float(row["Year_of_Release"])] = [row["NA_Sales"], row["EU_Sales"], row["JP_Sales"], row["Global_Sales"]] else: yearSales[float(row["Year_of_Release"])][0] = yearSales[float(row["Year_of_Release"])][0] + row["NA_Sales"] yearSales[float(row["Year_of_Release"])][1] = yearSales[float(row["Year_of_Release"])][1] + row["EU_Sales"] yearSales[float(row["Year_of_Release"])][2] = yearSales[float(row["Year_of_Release"])][2] + row["JP_Sales"] yearSales[float(row["Year_of_Release"])][3] = yearSales[float(row["Year_of_Release"])][3] + row["Global_Sales"] except: continue data = [] for each in yearSales.keys(): data.append((each, yearSales[each])) yearSales = sorted(data, key=lambda x: (x[0]), reverse=False) yearSalesLabels = [] yearSalesVals = [] for each in yearSales: yearSalesLabels.append(str(int(each[0]))) yearSalesVals.append(int(each[1][3])) yearSalesLabels.pop() yearSalesVals.pop() pos = arange(len(yearSalesLabels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(yearSalesLabels) plt.xticks(rotation=65) ax.tick_params(labelsize=17) plt.title("Svetovna prodaja skozi leta (v milijonih)") plt.bar(pos, yearSalesVals, 1) plt.show() return def yearSalesSplit(rawDATA): sales = dict() for each in rawDATA: if each["Year_of_Release"] in sales: price = sales[each["Year_of_Release"]] if "PC" == each["Platform"]: price[0] += float(each["NA_Sales"]) elif "PS" == each["Platform"] or "PS2" == each["Platform"] or "PS3" == each["Platform"] or "PS4" == each[ "Platform"]: price[1] += float(each["NA_Sales"]) elif "X360" == each["Platform"] or "XOne" == each["Platform"] or "XB" == each["Platform"]: price[2] += float(each["NA_Sales"]) else: price[3] += float(each["NA_Sales"]) sales[each["Year_of_Release"]] = price else: # pc, ps, xbox, other price = [0, 0, 0, 0] if "PC" == each["Platform"]: price[0] = float(each["NA_Sales"]) elif "PS" == each["Platform"] or "PS2" == each["Platform"] or "PS3" == each["Platform"] or "PS4" == each[ "Platform"]: price[1] = float(each["NA_Sales"]) elif "X360" == each["Platform"] or "XOne" == each["Platform"] or "XB" == each["Platform"]: price[2] = float(each["NA_Sales"]) else: price[3] = float(each["NA_Sales"]) sales[each["Year_of_Release"]] = price data = [] for each in sales: if each == "N/A" or int(each) < 1994: continue data.append((int(each), sales[each])) yearSales = sorted(data, key=lambda x: (x[0]), reverse=False) yearSalesLabels = [] yearSalesVals = [[], [], [], []] for each in yearSales: yearSalesLabels.append(str(each[0])) yearSalesVals[0].append(each[1][0]) yearSalesVals[1].append(each[1][1]) yearSalesVals[2].append(each[1][2]) yearSalesVals[3].append(each[1][3]) yearSalesLabels.pop() yearSalesVals[0].pop() yearSalesVals[1].pop() yearSalesVals[2].pop() yearSalesVals[3].pop() pos = arange(len(yearSalesLabels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(yearSalesLabels) plt.xticks(rotation=90) plt.bar(pos - 0.3, yearSalesVals[0], width=0.3, color='r', align='center') plt.bar(pos, yearSalesVals[1], width=0.3, color='b', align='center') plt.bar(pos + 0.3, yearSalesVals[2], width=0.3, color='g', align='center') plt.legend(["PC", "PS", "XB"]) plt.title("Prodaje po letih") plt.tight_layout() plt.show() return def genresByYears(joinedDATA): genres = dict() for each in joinedDATA: if each["Genre"] in genres: if each["Year_of_Release"] in genres[each["Genre"]]: genres[each["Genre"]][each["Year_of_Release"]] += each["NA_Sales"] else: genres[each["Genre"]][each["Year_of_Release"]] = each["NA_Sales"] else: genres[each["Genre"]] = dict() genres[each["Genre"]][each["Year_of_Release"]] = each["NA_Sales"] data = [] labels = [] for each in genres: temp = [] for a in genres[each]: temp.append((a, genres[each][a])) data.append(temp) labels.append(each) for i in range(len(data)): data[i].pop() data[i] = sorted(data[i], key=lambda x: (x[0]), reverse=False) yearSalesLabels = [] for each in range(1980, 2017): yearSalesLabels.append(each) data2 = [] for i in range(len(data)): data2.append([]) index = 0 for j in range(1980, 2017): isIn = False for year, num in data[i]: if year != "N/A" and int(year) == j: data2[i].append(float(num)) isIn = True break if not isIn: data2[i].append(0) labels.pop() pos = arange(len(yearSalesLabels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(yearSalesLabels) plt.xticks(rotation=90) plt.bar(pos - 0.3, data2[0], width=0.3, color='r', align='center') plt.bar(pos, data2[1], width=0.3, color='b', align='center') plt.bar(pos + 0.3, data2[2], width=0.3, color='g', align='center') plt.legend(labels[:3]) plt.title("Žanri po letih") plt.tight_layout() plt.show() pos = arange(len(yearSalesLabels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(yearSalesLabels) plt.xticks(rotation=90) plt.bar(pos - 0.3, data2[3], width=0.3, color='r', align='center') plt.bar(pos, data2[4], width=0.3, color='b', align='center') plt.bar(pos + 0.3, data2[5], width=0.3, color='g', align='center') plt.legend(labels[3:7]) plt.title("Žanri po letih") plt.tight_layout() plt.show() pos = arange(len(yearSalesLabels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(yearSalesLabels) plt.xticks(rotation=90) plt.bar(pos - 0.3, data2[6], width=0.3, color='r', align='center') plt.bar(pos, data2[7], width=0.3, color='b', align='center') plt.bar(pos + 0.3, data2[8], width=0.3, color='g', align='center') plt.legend(labels[6:11]) plt.title("Žanri po letih") plt.tight_layout() plt.show() pos = arange(len(yearSalesLabels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(yearSalesLabels) plt.xticks(rotation=90) plt.bar(pos - 0.3, data2[9], width=0.3, color='r', align='center') plt.bar(pos, data2[10], width=0.3, color='b', align='center') plt.bar(pos + 0.3, data2[11], width=0.3, color='g', align='center') plt.legend(labels[9:]) plt.title("Žanri po letih") plt.tight_layout() plt.show() return def bestDevelopers(rawDATA): developers = dict() for row in rawDATA: try: if row["Developer"] == "": continue; if row["Developer"] not in developers.keys(): developers[row["Developer"]] = [row["Global_Sales"], (int(row["Critic_Score"]))] else: developers[row["Developer"]][0] = developers[row["Developer"]][0] + row["Global_Sales"] developers[row["Developer"]][1].append(int(row["Critic_Score"])) except: continue print("| ------------------------ | ---------------------------- |") data = [] for key in developers.keys(): developers[key][0] = mean(developers[key][0]) data.append((key, developers[key])) developers = sorted(data, key=lambda x: (x[1][0]), reverse=True) top10devsSales = [] top10devsSalesLabels = [] for each in developers[:10]: top10devsSales.append(float(each[1][0])) top10devsSalesLabels.append(each[0]) pos = arange(len(top10devsSalesLabels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(top10devsSalesLabels) ax.tick_params(labelsize=13) plt.xticks(rotation=65) plt.title("Najboljši developerji po svetovnih prodajah") plt.bar(pos, top10devsSales, 1) plt.tight_layout() plt.figure(num=None, figsize=(15, 10), dpi=80) plt.show() developers = sorted(data, key=lambda x: (x[1][1]), reverse=True) print("\nNajboljši developerji po povprečni oceni kritikov:") print("| Ime | Ocena |") print("| ------------------------ | ---------------------------- |") for each in developers[:10]: print( "| " + each[0] + " | " + str(each[1][1]) + " |") return def esrb(joinedDATA): ratings = dict() for each in joinedDATA: if each["Rating"] not in ratings: ratings[each["Rating"]] = each["NA_Sales"]; else: ratings[each["Rating"]] += each["NA_Sales"]; labels = [] data = [] for each in ratings: if each == "": labels.append("No Rating") else: labels.append(each) data.append(ratings[each]) pos = arange(len(labels)) ax = plt.axes() ax.set_xticks(pos) ax.set_xticklabels(labels) plt.xticks(rotation=90) plt.bar(pos, data, width=0.3, color='b', align='center') plt.title("Prodaje glede na ESRB oceno") plt.tight_layout() plt.show() return #MAIN----------------------------------------------------------------------------------------- rawDATA = getData("./DATA/Video_Games_Sales_as_at_22_Dec_2016.csv") areas = ["NA", "EU", "JP", "Global"] rawDATA = joinData(rawDATA) #TOP 10 Prodanih #--------------------- for i in areas: top10(rawDATA, i) #Prodaje po žanrih #--------------------- for i in areas: genresSales(rawDATA, i) #Prodaje po letih #--------------------- yearSales(rawDATA) yearSalesSplit( getData("./DATA/Video_Games_Sales_as_at_22_Dec_2016.csv")) genresByYears(rawDATA) #Najboljši developerji #---------------------- #bestDevelopers(rawDATA) #Prodaje glede na ESRB ocene #---------------------- esrb(rawDATA) exit(0)
{ "content_hash": "0000dad3bad6cd3410f8fdb6c7215de2", "timestamp": "", "source": "github", "line_count": 481, "max_line_length": 133, "avg_line_length": 30.673596673596673, "alnum_prop": 0.5410736071573811, "repo_name": "klemenStanic/PR17KSLK", "id": "c10603c7f3f266c9a3ecd6b674b5b47fb60f0e24", "size": "14767", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "projekt.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "14767" } ], "symlink_target": "" }
from __future__ import print_function from six.moves import range import numpy as np __all__ = [ 'predict_RAM_usage', 'convert_to_num', 'convert_to_bin' ] def binarization(array): # Takes a binary-class datafile and turn the max value (positive class) # into 1 and the min into 0 array = np.array(array, dtype=float) # conversion needed to use np.inf if len(np.unique(array)) > 2: raise ValueError('The argument must be a binary-class datafile. ' '{} classes detected'.format(len(np.unique(array)))) # manipulation which aims at avoid error in data # with for example classes '1' and '2'. array[array == np.amax(array)] = np.inf array[array == np.amin(array)] = 0 array[array == np.inf] = 1 return np.array(array, dtype=int) def multilabel_to_multiclass(array): array = binarization(array) return np.array([np.nonzero(array[i, :])[0][0] for i in range(len(array))]) def convert_to_num(Ybin): """ Convert binary targets to numeric vector typically classification target values :param Ybin: :return: """ result = np.array(Ybin) if len(Ybin.shape) != 1: result = np.dot(Ybin, range(Ybin.shape[1])) return result def convert_to_bin(Ycont, nval, verbose=True): # Convert numeric vector to binary (typically classification target values) if verbose: pass Ybin = [[0] * nval for x in range(len(Ycont))] for i in range(len(Ybin)): line = Ybin[i] line[np.int(Ycont[i])] = 1 Ybin[i] = line return Ybin def predict_RAM_usage(X, categorical): # Return estimated RAM usage of dataset after OneHotEncoding in bytes. estimated_columns = 0 for i, cat in enumerate(categorical): if cat: unique_values = np.unique(X[:, i]) num_unique_values = np.sum(np.isfinite(unique_values)) estimated_columns += num_unique_values else: estimated_columns += 1 estimated_ram = estimated_columns * X.shape[0] * X.dtype.itemsize return estimated_ram
{ "content_hash": "d03dec8722563d3a2e0b93dedbe08ad0", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 79, "avg_line_length": 30.02857142857143, "alnum_prop": 0.6289248334919124, "repo_name": "hmendozap/auto-sklearn", "id": "dd95a0be4015a441630cf74312328e3725014388", "size": "2213", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "autosklearn/util/data.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "6722" }, { "name": "Makefile", "bytes": "6791" }, { "name": "Python", "bytes": "1207634" }, { "name": "Shell", "bytes": "851" } ], "symlink_target": "" }
import os.path import json import platform import re import uuid import helix.depcheck import helix.logs import helix.proc import helix.saferequests from helix.cmdline import command_main from helix.io import fix_path, zip_directory, add_file_to_zip from helix.platformutil import is_windows from helix_test_execution import HelixTestExecution from helix.settings import settings_from_env from helix.servicebusrepository import ServiceBusRepository from helix.workitem import HelixWorkItem log = helix.logs.get_logger() def main(args=None): def _main(settings, optlist, args): """ Usage:: continuationrunner [--config config.json] [--setting name=value] --script [--args arg1 arg2...] """ optdict = dict(optlist) log.info("BuildTools Helix Continuation Runner starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments: " + script_arguments) if '--script' in optdict: script_to_execute = optdict['--script'] else: log.error("Value for parameter '--script' is required") return -1 if '--next_queue' in optdict: next_queue = optdict['--next_queue'] else: log.error("Need a secondary queue id to continue execution.") return -1 if '--next_payload_dir' in optdict: next_payload_dir = optdict['--next_payload_dir'] else: log.error("Need a secondary payload to continue execution.") return -1 unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args return_code = helix.proc.run_and_log_output( execution_args, cwd=unpack_dir, env=None ) if return_code == 0: # currently there's no use for it, but here's where we'd choose to send out XUnit results # if desired at some point. log.info("First stage of execution succeded. Sending a new work item to " + next_queue) log.info("Will include contents of " + next_payload_dir) settings = settings_from_env() # load Client-specific settings config_path = os.path.join(settings.config_root, "ClientSettings.json") settings.__dict__.update(json.load(open(config_path))) service_bus_repository = ServiceBusRepository(settings.ServiceBusRoot, settings.QueueId, settings.LongPollTimeout, settings.SAS, settings.servicebus_retry_count, settings.servicebus_retry_delay ) # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario) call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation" if is_windows(): continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd" else: continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh" # Prep the follow-up work item ... new_work_item = HelixWorkItem( correlation_id=settings.correlation_id, work_item_friendly_name=settings.workitem_friendly_name + ".Execution", command=continuation_command, results_output_uri=settings.output_uri + "/continuation", results_output_write_token=settings.output_write_token, results_output_read_token=settings.output_read_token) # This may eventually cause trouble if zips with identical names are somehow included inside # other payload zips. Chained continuation will be OK as there will be a new results # directory to upload to for each leg. new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip" secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name) zip_directory(secondary_zip_path, next_payload_dir) log.info("Zipped into " + secondary_zip_path) # Upload the payloads for the job upload_client = helix.azure_storage.BlobUploadClient(settings.output_uri, settings.output_write_token, settings.output_read_token) new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name) new_work_item.WorkItemPayloadUris.append(new_payload_uri) # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts) # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from # storage will involve plumbing that through or re-uploading it (can be huge) supplemental_payload_path = os.path.join(settings.work_root, settings.correlation_id, "work", "SupplementalPayload.zip") supplemental_payload_uri = upload_client.upload(supplemental_payload_path, "SupplementalPayload.zip") log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri) log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri) new_work_item.CorrelationPayloadUris.append(supplemental_payload_uri) if service_bus_repository.post_new_workitem(queue_id=next_queue, work_item=new_work_item): log.info("Successfully queued new work item.") else: log.error("Failure to send to Service bus.") return -1 else: log.error("Got non-zero exit code for first stage of execution. Skipping further processing.") return return_code return command_main(_main, ['script=', 'args=', 'next_queue=', 'next_payload_dir='], args) if __name__ == '__main__': import sys sys.exit(main()) helix.depcheck.check_dependencies(__name__)
{ "content_hash": "b021a1c57809a765aac1a012572f7552", "timestamp": "", "source": "github", "line_count": 147, "max_line_length": 119, "avg_line_length": 46.564625850340136, "alnum_prop": 0.581300219138057, "repo_name": "joperezr/buildtools", "id": "ccd65182f0107217b8607b920c6a86aabb2280d0", "size": "7065", "binary": false, "copies": "13", "ref": "refs/heads/master", "path": "src/Microsoft.DotNet.Build.CloudTestTasks/RunnerScripts/scriptrunner/continuationrunner.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "12763" }, { "name": "C#", "bytes": "2808867" }, { "name": "C++", "bytes": "65225" }, { "name": "Groovy", "bytes": "957" }, { "name": "Perl", "bytes": "1497" }, { "name": "PowerShell", "bytes": "9799" }, { "name": "Python", "bytes": "15146" }, { "name": "Shell", "bytes": "22425" }, { "name": "Visual Basic", "bytes": "249" }, { "name": "XSLT", "bytes": "13094" } ], "symlink_target": "" }
from tests import clear_staging_env, staging_env, eq_, ne_, is_, staging_directory from tests import _no_sql_testing_config, env_file_fixture, script_file_fixture, _testing_config from alembic import command from alembic.script import ScriptDirectory from alembic.environment import EnvironmentContext from alembic import util import os import unittest import datetime env, abc, def_ = None, None, None class GeneralOrderedTests(unittest.TestCase): def test_001_environment(self): assert_set = set(['env.py', 'script.py.mako', 'README']) eq_( assert_set.intersection(os.listdir(env.dir)), assert_set ) def test_002_rev_ids(self): global abc, def_ abc = util.rev_id() def_ = util.rev_id() ne_(abc, def_) def test_003_heads(self): eq_(env.get_heads(), []) def test_004_rev(self): script = env.generate_revision(abc, "this is a message", refresh=True) eq_(script.doc, "this is a message") eq_(script.revision, abc) eq_(script.down_revision, None) assert os.access( os.path.join(env.dir, 'versions', '%s_this_is_a_message.py' % abc), os.F_OK) assert callable(script.module.upgrade) eq_(env.get_heads(), [abc]) def test_005_nextrev(self): script = env.generate_revision(def_, "this is the next rev", refresh=True) assert os.access( os.path.join(env.dir, 'versions', '%s_this_is_the_next_rev.py' % def_), os.F_OK) eq_(script.revision, def_) eq_(script.down_revision, abc) eq_(env._revision_map[abc].nextrev, set([def_])) assert script.module.down_revision == abc assert callable(script.module.upgrade) assert callable(script.module.downgrade) eq_(env.get_heads(), [def_]) def test_006_from_clean_env(self): # test the environment so far with a # new ScriptDirectory instance. env = staging_env(create=False) abc_rev = env._revision_map[abc] def_rev = env._revision_map[def_] eq_(abc_rev.nextrev, set([def_])) eq_(abc_rev.revision, abc) eq_(def_rev.down_revision, abc) eq_(env.get_heads(), [def_]) def test_007_no_refresh(self): rid = util.rev_id() script = env.generate_revision(rid, "dont' refresh") is_(script, None) env2 = staging_env(create=False) eq_(env2._as_rev_number("head"), rid) def test_008_long_name(self): rid = util.rev_id() env.generate_revision(rid, "this is a really long name with " "lots of characters and also " "I'd like it to\nhave\nnewlines") assert os.access( os.path.join(env.dir, 'versions', '%s_this_is_a_really_lon.py' % rid), os.F_OK) @classmethod def setup_class(cls): global env env = staging_env() @classmethod def teardown_class(cls): clear_staging_env() class ScriptNamingTest(unittest.TestCase): @classmethod def setup_class(cls): _testing_config() @classmethod def teardown_class(cls): clear_staging_env() def test_args(self): script = ScriptDirectory( staging_directory, file_template="%(rev)s_%(slug)s_" "%(year)s_%(month)s_" "%(day)s_%(hour)s_" "%(minute)s_%(second)s" ) create_date = datetime.datetime(2012, 7, 25, 15, 8, 5) eq_( script._rev_path("12345", "this is a message", create_date), "%s/versions/12345_this_is_a_" "message_2012_7_25_15_8_5.py" % staging_directory ) class TemplateArgsTest(unittest.TestCase): def setUp(self): staging_env() self.cfg = _no_sql_testing_config( directives="\nrevision_environment=true\n" ) def tearDown(self): clear_staging_env() def test_args_propagate(self): config = _no_sql_testing_config() script = ScriptDirectory.from_config(config) template_args = {"x": "x1", "y": "y1", "z": "z1"} env = EnvironmentContext( config, script, template_args=template_args ) env.configure(dialect_name="sqlite", template_args={"y": "y2", "q": "q1"}) eq_( template_args, {"x": "x1", "y": "y2", "z": "z1", "q": "q1"} ) def test_tmpl_args_revision(self): env_file_fixture(""" context.configure(dialect_name='sqlite', template_args={"somearg":"somevalue"}) """) script_file_fixture(""" # somearg: ${somearg} revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} """) command.revision(self.cfg, message="some rev") script = ScriptDirectory.from_config(self.cfg) rev = script.get_revision('head') text = open(rev.path).read() assert "somearg: somevalue" in text
{ "content_hash": "84b026a84ac7b82d29074ed2803f48f7", "timestamp": "", "source": "github", "line_count": 156, "max_line_length": 96, "avg_line_length": 32.73717948717949, "alnum_prop": 0.5641276679067946, "repo_name": "shadowmint/py-test-watcher", "id": "fd9a7ed74aeb4eb06e52a7491a94df1657479031", "size": "5107", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/alembic-0.5.0/tests/test_revision_create.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "109878" }, { "name": "Python", "bytes": "488664" }, { "name": "Shell", "bytes": "544" } ], "symlink_target": "" }
""" Created on Wed Jan 6 12:01:11 2021 @author: akeeste I'm creating this script to serve as a parsed down version of the CASE.py + call_capy.py scripts. Hopefully this better highlights the necessities for running Capytaine without the wrapper, which is convenient but harder to read through. Also see: Capytaine/docs/user_manual/examples directory User Manual cookbook in documentation """ import capytaine as cpt import numpy as np import xarray as xr # 1. Create a Capytaine 'FloatingBody' and assign appropriate properties and dofs # A. Create the body from a mesh or a pre-packaged geometry # The mesh is the same format as Nemoh: mesh origin at the SWL, cg relative # to the mesh origin. # NOTE: input meshes should not have a lid body = cpt.FloatingBody.from_file('Sphere/sphere.dat') # many file types supported # body = cpt.Sphere(radius=5.0,center=(0,0,-2),nphi=50,ntheta=50) # B. Define the center of gravity, this is necessary for the rotational dofs # to be calculated about the center of mass and not the mesh origin body.center_of_mass = (0,0,-2) # C. Cut the mesh at the SWL. This may or may not be redundant. It is necessary # for the prepackaged Capytaine geometries body.keep_immersed_part() # D. Add degrees of freedom body.add_all_rigid_body_dofs() # E. Define simulation parameters freq = np.linspace(0.02, 8.4, 3) directions = np.linspace(0,90,2) # 2. Define a list of problems to be solved. Can be radiation problems or # diffraction problems. ( # Uses python 'list comprehension' to easily loop through all dofs / freq problems = [cpt.RadiationProblem(body=body, radiating_dof=dof, omega=w, sea_bottom=-np.infty, g=9.81, rho=1000.0) for dof in body.dofs for w in freq] problems += [cpt.DiffractionProblem(body=body, omega=w, wave_direction=heading, sea_bottom=-np.infty, g=9.81, rho=1000.0) for w in freq for heading in directions] # 3. Define the BEM solver to be used. solver = cpt.BEMSolver() # 4. Loop through the problems and solve each results = [solver.solve(problem, keep_details=True) for problem in sorted(problems)] # 5. Create a dataset using the list of results and any hydrostatics output capyData = cpt.assemble_dataset(results, hydrostatics=False) ############################################################################### # ** Alternate method to 2-5 # Create a dataset of parameters. # 'fill_dataset()' automatically creates problems and solves them. # This method is easy and clean, but has less control on the problem details test_matrix = xr.Dataset(coords={ 'omega': freq, 'wave_direction': directions, 'radiating_dof': list(body.dofs), 'water_depth': [np.infty], }) dataset = solver.fill_dataset(test_matrix, [body], hydrostatics=False) ############################################################################### # 6. Separate complex values and save to .nc file ncFName = 'demo.nc' cpt.io.xarray.separate_complex_values(capyData).to_netcdf(ncFName) # 7. Use Read_CAPYTAINE() function in WEC-Sim BEMIO
{ "content_hash": "c8310f7303a4da58e7eb11e38dc045e7", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 84, "avg_line_length": 36.666666666666664, "alnum_prop": 0.6217008797653959, "repo_name": "WEC-Sim/WEC-Sim", "id": "7e9db176584c53d442acbb5a9b92c723d05489ef", "size": "3434", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/BEMIO/CAPYTAINE/demo.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "MATLAB", "bytes": "17524119" } ], "symlink_target": "" }
from flask import jsonify from . import api @api.errorhandler (403) def not_found (e): response = jsonify ({'error': 'forbidden'}) response.status_code = 403 return response
{ "content_hash": "8122c26fffc4c6efb81ba19bc559a1fd", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 47, "avg_line_length": 21, "alnum_prop": 0.6825396825396826, "repo_name": "lanig35/flask-app", "id": "ea0bdfba89189e63f376401339a2ace094d77188", "size": "231", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/api/errors.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import config import gettext import os import re import shlex import subprocess import sys from Cheetah.Template import Template from ovirt_engine import configfile, java, service def _(m): return gettext.dgettext(message=m, domain='ovirt-engine') class Daemon(service.Daemon): _JBOSS_VERSION_REGEX = re.compile( flags=re.VERBOSE, pattern=r""" ^ [^\d]* (?P<major>\d+) \. (?P<minor>\d+) \. (?P<revision>\d+) .* """, ) def __init__(self): super(Daemon, self).__init__() self._tempDir = None self._jbossRuntime = None self._jbossVersion = None self._jbossConfigFile = None self._defaults = os.path.abspath( os.path.join( os.path.dirname(sys.argv[0]), 'ovirt-engine.conf', ) ) def _processTemplate(self, template, dir, mode=None): out = os.path.join( dir, re.sub('\.in$', '', os.path.basename(template)), ) with open(out, 'w') as f: if mode is not None: os.chmod(out, mode) f.write( '%s' % ( Template( file=template, searchList=[ self._config, self._jbossVersion, { 'jboss_runtime': self._jbossRuntime.directory, }, ], ) ), ) return out def _linkModules(self, directory, modulePath): """ Link all the JBoss modules into a temporary directory. This required because jboss tries to automatically update indexes based on timestamp even if there is no permission to do so. """ modifiedModulePath = [] for index, element in enumerate(modulePath.split(':')): modulesTmpDir = os.path.join( directory, '%02d-%s' % ( index, '-'.join(element.split(os.sep)[-2:]), ), ) modifiedModulePath.append(modulesTmpDir) # For each directory in the modules directory create the # same in the temporary directory and populate with symlinks # pointing to the original files (excluding indexes): for parentDir, childrenDirs, childrenFiles in os.walk(element): parentTmpDir = os.path.join( modulesTmpDir, os.path.relpath( parentDir, element ), ) if not os.path.exists(parentTmpDir): os.makedirs(parentTmpDir) for childFile in childrenFiles: if childFile.endswith('.index'): continue os.symlink( os.path.join(parentDir, childFile), os.path.join(parentTmpDir, childFile) ) return ':'.join(modifiedModulePath) def _checkInstallation( self, pidfile, jbossModulesJar, ): # Check the required JBoss directories and files: self.check( name=self._config.get('JBOSS_HOME'), directory=True, ) self.check( name=jbossModulesJar, ) # Check the required engine directories and files: self.check( os.path.join( self._config.get('ENGINE_USR'), 'services', ), directory=True, ) self.check( self._config.get('ENGINE_CACHE'), directory=True, writable=True, ) self.check( self._config.get('ENGINE_TMP'), directory=True, writable=True, mustExist=False, ) self.check( self._config.get('ENGINE_LOG'), directory=True, writable=True, ) self.check( name=os.path.join( self._config.get("ENGINE_LOG"), 'host-deploy', ), directory=True, writable=True, ) for log in ('engine.log', 'console.log', 'server.log'): self.check( name=os.path.join(self._config.get("ENGINE_LOG"), log), mustExist=False, writable=True, ) if pidfile is not None: self.check( name=pidfile, writable=True, mustExist=False, ) def _setupEngineApps(self): deploymentsDir = os.path.join( self._jbossRuntime.directory, 'deployments', ) os.mkdir(deploymentsDir) # The list of applications to be deployed: for engineAppDir in shlex.split(self._config.get('ENGINE_APPS')): self.logger.debug('Deploying: %s', engineAppDir) if not os.path.isabs(engineAppDir): engineAppDir = os.path.join( self._config.get('ENGINE_USR'), engineAppDir, ) if not os.path.exists(engineAppDir): self.logger.warning( _( "Application directory '{directory}' " "does not exist, it will be ignored" ).format( directory=engineAppDir, ), ) continue engineAppLink = os.path.join( deploymentsDir, os.path.basename(engineAppDir), ) os.symlink(engineAppDir, engineAppLink) with open('%s.dodeploy' % engineAppLink, 'w'): pass def _detectJBossVersion(self): proc = subprocess.Popen( executable=self._executable, args=['ovirt-engine-version'] + self._engineArgs + ['-v'], env=self._engineEnv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) stdout, stderr = proc.communicate() stdout = stdout.decode('utf-8', 'replace').splitlines() stderr = stderr.decode('utf-8', 'replace').splitlines() self.logger.debug( "Return code: %s, \nstdout: '%s, \nstderr: '%s'", proc.returncode, stdout, stderr, ) for line in stdout: match = self._JBOSS_VERSION_REGEX.match(line) if match is not None: self._jbossVersion = { 'JBOSS_MAJOR': int(match.group('major')), 'JBOSS_MINOR': int(match.group('minor')), 'JBOSS_REVISION': int(match.group('revision')), } break else: raise RuntimeError(_('Cannot detect JBoss version')) self.logger.debug( "Detected JBoss version: %s", self._jbossVersion, ) def daemonSetup(self): if os.geteuid() == 0: raise RuntimeError( _('This service cannot be executed as root') ) if not os.path.exists(self._defaults): raise RuntimeError( _( "The configuration defaults file '{file}' " "required but missing" ).format( file=self._defaults, ) ) self._config = configfile.ConfigFile( ( self._defaults, config.ENGINE_VARS, ), ) # # the earliest so we can abort early. # self._executable = os.path.join( java.Java().getJavaHome(), 'bin', 'java', ) jbossModulesJar = os.path.join( self._config.get('JBOSS_HOME'), 'jboss-modules.jar', ) self._checkInstallation( pidfile=self.pidfile, jbossModulesJar=jbossModulesJar, ) self._tempDir = service.TempDir(self._config.get('ENGINE_TMP')) self._tempDir.create() self._jbossRuntime = service.TempDir(self._config.get('JBOSS_RUNTIME')) self._jbossRuntime.create() self._setupEngineApps() jbossTempDir = os.path.join( self._jbossRuntime.directory, 'tmp', ) jbossConfigDir = os.path.join( self._jbossRuntime.directory, 'config', ) javaModulePath = self._linkModules( os.path.join( self._jbossRuntime.directory, 'modules', ), '%s:%s' % ( self._config.get('ENGINE_JAVA_MODULEPATH'), os.path.join( self._config.get('JBOSS_HOME'), 'modules', ), ), ) os.mkdir(jbossTempDir) os.mkdir(jbossConfigDir) os.chmod(jbossConfigDir, 0o700) jbossBootLoggingFile = self._processTemplate( template=os.path.join( os.path.dirname(sys.argv[0]), 'ovirt-engine-logging.properties.in' ), dir=jbossConfigDir, ) # We start with an empty list of arguments: self._engineArgs = [] # Add arguments for the java virtual machine: self._engineArgs.extend([ # Virtual machine options: '-server', '-XX:+TieredCompilation', '-Xms%s' % self._config.get('ENGINE_HEAP_MIN'), '-Xmx%s' % self._config.get('ENGINE_HEAP_MAX'), '-XX:PermSize=%s' % self._config.get('ENGINE_PERM_MIN'), '-XX:MaxPermSize=%s' % self._config.get( 'ENGINE_PERM_MAX' ), ]) # Add extra system properties provided in the configuration: for engineProperty in shlex.split( self._config.get('ENGINE_PROPERTIES') ): if not engineProperty.startswith('-D'): engineProperty = '-D' + engineProperty self._engineArgs.append(engineProperty) # Add extra jvm arguments provided in the configuration: for arg in shlex.split(self._config.get('ENGINE_JVM_ARGS')): self._engineArgs.append(arg) # Enable verbose garbage collection if required: if self._config.getboolean('ENGINE_VERBOSE_GC'): self._engineArgs.extend([ '-verbose:gc', '-XX:+PrintGCTimeStamps', '-XX:+PrintGCDetails', ]) # Specify special krb5.conf file if required if self._config.get('AAA_KRB5_CONF_FILE'): self._engineArgs.append( '-Djava.security.krb5.conf=%s' % self._config.get( 'AAA_KRB5_CONF_FILE' ) ) # Add arguments for JBoss: self._engineArgs.extend([ '-Djava.util.logging.manager=org.jboss.logmanager', '-Dlogging.configuration=file://%s' % jbossBootLoggingFile, '-Dorg.jboss.resolver.warning=true', '-Djboss.modules.system.pkgs=org.jboss.byteman', '-Djboss.modules.write-indexes=false', '-Djboss.server.default.config=ovirt-engine', '-Djboss.home.dir=%s' % self._config.get( 'JBOSS_HOME' ), '-Djboss.server.base.dir=%s' % self._config.get( 'ENGINE_USR' ), '-Djboss.server.data.dir=%s' % self._config.get( 'ENGINE_VAR' ), '-Djboss.server.log.dir=%s' % self._config.get( 'ENGINE_LOG' ), '-Djboss.server.config.dir=%s' % jbossConfigDir, '-Djboss.server.temp.dir=%s' % jbossTempDir, '-Djboss.controller.temp.dir=%s' % jbossTempDir, '-jar', jbossModulesJar, '-mp', javaModulePath, '-jaxpmodule', 'javax.xml.jaxp-provider', 'org.jboss.as.standalone', ]) self._engineEnv = os.environ.copy() self._engineEnv.update({ 'PATH': ( '/usr/local/sbin:/usr/local/bin:' '/usr/sbin:/usr/bin:/sbin:/bin' ), 'LANG': 'en_US.UTF-8', 'LC_ALL': 'en_US.UTF-8', 'ENGINE_DEFAULTS': self._defaults, 'ENGINE_VARS': config.ENGINE_VARS, 'ENGINE_ETC': self._config.get('ENGINE_ETC'), 'ENGINE_LOG': self._config.get('ENGINE_LOG'), 'ENGINE_TMP': self._tempDir.directory, 'ENGINE_USR': self._config.get('ENGINE_USR'), 'ENGINE_VAR': self._config.get('ENGINE_VAR'), 'ENGINE_CACHE': self._config.get('ENGINE_CACHE'), }) self._detectJBossVersion() self._jbossConfigFile = self._processTemplate( template=os.path.join( os.path.dirname(sys.argv[0]), 'ovirt-engine.xml.in', ), dir=jbossConfigDir, mode=0o600, ) def daemonStdHandles(self): consoleLog = open( os.path.join( self._config.get('ENGINE_LOG'), 'console.log' ), 'w+', ) return (consoleLog, consoleLog) def daemonContext(self): try: # # create mark file to be used by notifier service # with open(self._config.get('ENGINE_UP_MARK'), 'w') as f: f.write('%s\n' % os.getpid()) # # NOTE: # jdwp must be set only for the process we are trying # to debug, as jvm will open it and conflict with other # instances. # self.daemonAsExternalProcess( executable=self._executable, args=( ['ovirt-engine'] + ([( '-Xrunjdwp:transport=dt_socket,address=%s,' 'server=y,suspend=n' ) % ( self._config.get('ENGINE_DEBUG_ADDRESS') )] if self._config.get('ENGINE_DEBUG_ADDRESS') else []) + self._engineArgs + ['-c', os.path.basename(self._jbossConfigFile)] ), env=self._engineEnv, stopTime=self._config.getinteger( 'ENGINE_STOP_TIME' ), stopInterval=self._config.getinteger( 'ENGINE_STOP_INTERVAL' ), ) raise self.TerminateException() except self.TerminateException: if os.path.exists(self._config.get('ENGINE_UP_MARK')): os.remove(self._config.get('ENGINE_UP_MARK')) def daemonCleanup(self): if self._tempDir: self._tempDir.destroy() if self._jbossRuntime: self._jbossRuntime.destroy() if __name__ == '__main__': service.setupLogger() d = Daemon() d.run() # vim: expandtab tabstop=4 shiftwidth=4
{ "content_hash": "751c292b07e80c3b7109de3cef708729", "timestamp": "", "source": "github", "line_count": 497, "max_line_length": 79, "avg_line_length": 31.348088531187123, "alnum_prop": 0.47586649550706034, "repo_name": "yingyun001/ovirt-engine", "id": "10b82c18e3ec7088e0c7d05d2fdee946c8a07f5b", "size": "16183", "binary": false, "copies": "5", "ref": "refs/heads/eayunos-4.2", "path": "packaging/services/ovirt-engine/ovirt-engine.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "62308" }, { "name": "Groff", "bytes": "10764" }, { "name": "HTML", "bytes": "16218" }, { "name": "Java", "bytes": "35041045" }, { "name": "JavaScript", "bytes": "70092" }, { "name": "Makefile", "bytes": "24720" }, { "name": "PLpgSQL", "bytes": "795871" }, { "name": "Python", "bytes": "970779" }, { "name": "Shell", "bytes": "163500" }, { "name": "XSLT", "bytes": "54683" } ], "symlink_target": "" }
from glance.api import CONF import glance.db.registry.api from glance.db.sqlalchemy import api from glance.db.sqlalchemy import models as db_models import glance.tests.functional.db as db_tests from glance.tests import functional from glance.tests.functional.db import base def get_db(config): config(sql_connection='sqlite://', verbose=False, debug=False) CONF.set_override('data_api', 'glance.db.registry.api') db_api = glance.db.get_api() db_api.setup_db_env() return db_api def reset_db(db_api): pass class FunctionalInitWrapper(functional.FunctionalTest): def setUp(self): # NOTE(flaper87): We need to start the # registry service *before* TestDriver's # setup goes on, since it'll create some # images that will be later used in tests. # # Python's request is way too magical and # it will make the TestDriver's super call # FunctionalTest's without letting us start # the server. # # This setUp will be called by TestDriver # and will be used to call FunctionalTest # setUp method *and* start the registry # service right after it. super(FunctionalInitWrapper, self).setUp() self.registry_server.deployment_flavor = 'fakeauth' self.start_with_retry(self.registry_server, 'registry_port', 3, api_version=2) self.config(registry_port=self.registry_server.bind_port, use_user_token=True) class TestRegistryDriver(base.TestDriver, base.DriverTests, FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestRegistryDriver, self).setUp() self.addCleanup(db_tests.reset) def tearDown(self): self.registry_server.stop() super(TestRegistryDriver, self).tearDown() class TestRegistryQuota(base.DriverQuotaTests, FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestRegistryQuota, self).setUp() self.addCleanup(db_tests.reset) def tearDown(self): self.registry_server.stop() super(TestRegistryQuota, self).tearDown()
{ "content_hash": "11d7644d98f43a767bcbc24fb7db03e3", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 70, "avg_line_length": 31.708333333333332, "alnum_prop": 0.6434515987735436, "repo_name": "SUSE-Cloud/glance", "id": "8ada2f0c0a2795a31851c329f218d4de75dbae60", "size": "2912", "binary": false, "copies": "5", "ref": "refs/heads/stable/havana", "path": "glance/tests/functional/db/test_registry.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2557049" }, { "name": "Shell", "bytes": "3488" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Clinic_history', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('place', models.CharField(max_length=250)), ('date', models.DateField()), ('notes', models.CharField(max_length=250)), ], ), migrations.CreateModel( name='Dentist', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=250)), ('second_name', models.CharField(blank=True, max_length=250, null=True)), ('last_name', models.CharField(blank=True, max_length=250, null=True)), ('circle', models.IntegerField()), ('register_number', models.IntegerField()), ], ), migrations.CreateModel( name='Patient', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=250)), ('second_name', models.CharField(blank=True, max_length=250, null=True)), ('last_name', models.CharField(blank=True, max_length=250, null=True)), ('subsidiary_number', models.PositiveIntegerField(unique=True)), ('clinic_history', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='person.Clinic_history')), ('dentist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='person.Dentist')), ], ), ]
{ "content_hash": "65b9be7206ea13cdb162ce33b696c7c0", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 153, "avg_line_length": 42.95744680851064, "alnum_prop": 0.5720653789004457, "repo_name": "nanomolina/JP", "id": "314f1ec2b1bf70258ccf3fe70a354b0be89aca26", "size": "2091", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/odontology/person/migrations/0001_initial.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "189771" }, { "name": "HTML", "bytes": "222882" }, { "name": "JavaScript", "bytes": "42164" }, { "name": "Python", "bytes": "191397" } ], "symlink_target": "" }
"""A program to train a tensorflow neural net parser from a conll file.""" import base64 import os import os.path import random import time import tensorflow as tf from tensorflow.python.framework import errors from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from google.protobuf import text_format from syntaxnet.ops import gen_parser_ops from syntaxnet import task_spec_pb2 from syntaxnet import sentence_pb2 from dragnn.protos import spec_pb2 from dragnn.python.sentence_io import ConllSentenceReader from dragnn.python import evaluation from dragnn.python import graph_builder from dragnn.python import lexicon from dragnn.python import spec_builder from dragnn.python import trainer_lib from syntaxnet.util import check flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('tf_master', '', 'TensorFlow execution engine to connect to.') flags.DEFINE_string('dragnn_spec', '', 'Path to the spec defining the model.') flags.DEFINE_string('resource_path', '', 'Path to constructed resources.') flags.DEFINE_string('hyperparams', 'adam_beta1:0.9 adam_beta2:0.9 adam_eps:0.00001 ' 'decay_steps:128000 dropout_rate:0.8 gradient_clip_norm:1 ' 'learning_method:"adam" learning_rate:0.0005 seed:1 ' 'use_moving_average:true', 'Hyperparameters of the model to train, either in ProtoBuf' 'text format or base64-encoded ProtoBuf text format.') flags.DEFINE_string('tensorboard_dir', '', 'Directory for TensorBoard logs output.') flags.DEFINE_string('checkpoint_filename', '', 'Filename to save the best checkpoint to.') flags.DEFINE_string('training_corpus_path', '', 'Path to training data.') flags.DEFINE_string('tune_corpus_path', '', 'Path to tuning set data.') flags.DEFINE_bool('compute_lexicon', False, '') flags.DEFINE_bool('projectivize_training_set', True, '') flags.DEFINE_integer('batch_size', 4, 'Batch size.') flags.DEFINE_integer('report_every', 200, 'Report cost and training accuracy every this many steps.') flags.DEFINE_integer('job_id', 0, 'The trainer will clear checkpoints if the ' 'saved job id is less than the id this flag. If you want ' 'training to start over, increment this id.') def main(unused_argv): logging.set_verbosity(logging.INFO) check.IsTrue(FLAGS.checkpoint_filename) check.IsTrue(FLAGS.tensorboard_dir) check.IsTrue(FLAGS.resource_path) if not gfile.IsDirectory(FLAGS.resource_path): gfile.MakeDirs(FLAGS.resource_path) training_corpus_path = gfile.Glob(FLAGS.training_corpus_path)[0] tune_corpus_path = gfile.Glob(FLAGS.tune_corpus_path)[0] # SummaryWriter for TensorBoard tf.logging.info('TensorBoard directory: "%s"', FLAGS.tensorboard_dir) tf.logging.info('Deleting prior data if exists...') stats_file = '%s.stats' % FLAGS.checkpoint_filename try: stats = gfile.GFile(stats_file, 'r').readlines()[0].split(',') stats = [int(x) for x in stats] except errors.OpError: stats = [-1, 0, 0] tf.logging.info('Read ckpt stats: %s', str(stats)) do_restore = True if stats[0] < FLAGS.job_id: do_restore = False tf.logging.info('Deleting last job: %d', stats[0]) try: gfile.DeleteRecursively(FLAGS.tensorboard_dir) gfile.Remove(FLAGS.checkpoint_filename) except errors.OpError as err: tf.logging.error('Unable to delete prior files: %s', err) stats = [FLAGS.job_id, 0, 0] tf.logging.info('Creating the directory again...') gfile.MakeDirs(FLAGS.tensorboard_dir) tf.logging.info('Created! Instatiating SummaryWriter...') summary_writer = trainer_lib.get_summary_writer(FLAGS.tensorboard_dir) tf.logging.info('Creating TensorFlow checkpoint dir...') gfile.MakeDirs(os.path.dirname(FLAGS.checkpoint_filename)) # Constructs lexical resources for SyntaxNet in the given resource path, from # the training data. if FLAGS.compute_lexicon: logging.info('Computing lexicon...') lexicon.build_lexicon( FLAGS.resource_path, training_corpus_path, morph_to_pos=True) tf.logging.info('Loading MasterSpec...') master_spec = spec_pb2.MasterSpec() with gfile.FastGFile(FLAGS.dragnn_spec, 'r') as fin: text_format.Parse(fin.read(), master_spec) spec_builder.complete_master_spec(master_spec, None, FLAGS.resource_path) logging.info('Constructed master spec: %s', str(master_spec)) hyperparam_config = spec_pb2.GridPoint() # Build the TensorFlow graph. tf.logging.info('Building Graph...') hyperparam_config = spec_pb2.GridPoint() try: text_format.Parse(FLAGS.hyperparams, hyperparam_config) except text_format.ParseError: text_format.Parse(base64.b64decode(FLAGS.hyperparams), hyperparam_config) g = tf.Graph() with g.as_default(): builder = graph_builder.MasterBuilder(master_spec, hyperparam_config) component_targets = [ spec_pb2.TrainTarget( name=component.name, max_index=idx + 1, unroll_using_oracle=[False] * idx + [True]) for idx, component in enumerate(master_spec.component) if 'shift-only' not in component.transition_system.registered_name ] trainers = [ builder.add_training_from_config(target) for target in component_targets ] annotator = builder.add_annotation() builder.add_saver() # Read in serialized protos from training data. training_set = ConllSentenceReader( training_corpus_path, projectivize=FLAGS.projectivize_training_set, morph_to_pos=True).corpus() tune_set = ConllSentenceReader( tune_corpus_path, projectivize=False, morph_to_pos=True).corpus() # Ready to train! logging.info('Training on %d sentences.', len(training_set)) logging.info('Tuning on %d sentences.', len(tune_set)) pretrain_steps = [10000, 0] tagger_steps = 100000 train_steps = [tagger_steps, 8 * tagger_steps] with tf.Session(FLAGS.tf_master, graph=g) as sess: # Make sure to re-initialize all underlying state. sess.run(tf.global_variables_initializer()) if do_restore: tf.logging.info('Restoring from checkpoint...') builder.saver.restore(sess, FLAGS.checkpoint_filename) prev_tagger_steps = stats[1] prev_parser_steps = stats[2] tf.logging.info('adjusting schedule from steps: %d, %d', prev_tagger_steps, prev_parser_steps) pretrain_steps[0] = max(pretrain_steps[0] - prev_tagger_steps, 0) tf.logging.info('new pretrain steps: %d', pretrain_steps[0]) trainer_lib.run_training( sess, trainers, annotator, evaluation.parser_summaries, pretrain_steps, train_steps, training_set, tune_set, tune_set, FLAGS.batch_size, summary_writer, FLAGS.report_every, builder.saver, FLAGS.checkpoint_filename, stats) if __name__ == '__main__': tf.app.run()
{ "content_hash": "5af49164bf266d79a6882b8f138296bf", "timestamp": "", "source": "github", "line_count": 187, "max_line_length": 80, "avg_line_length": 37.45989304812834, "alnum_prop": 0.6922198429693076, "repo_name": "jiaphuan/models", "id": "3952d62e891d56df6e4859df53c79d6615211802", "size": "7682", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "research/syntaxnet/dragnn/tools/trainer.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1353" }, { "name": "C++", "bytes": "1224262" }, { "name": "GLSL", "bytes": "976" }, { "name": "HTML", "bytes": "147010" }, { "name": "JavaScript", "bytes": "33208" }, { "name": "Jupyter Notebook", "bytes": "71060" }, { "name": "Makefile", "bytes": "4763" }, { "name": "Protocol Buffer", "bytes": "72897" }, { "name": "Python", "bytes": "5957505" }, { "name": "Shell", "bytes": "76858" } ], "symlink_target": "" }
from __future__ import unicode_literals from django import forms from django.utils.translation import ugettext_lazy as _ # While this couples the geographic forms to the GEOS library, # it decouples from database (by not importing SpatialBackend). from django.contrib.gis.geos import GEOSException, GEOSGeometry, fromstr from .widgets import OpenLayersWidget class GeometryField(forms.Field): """ This is the basic form field for a Geometry. Any textual input that is accepted by GEOSGeometry is accepted by this form. By default, this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON. """ widget = OpenLayersWidget geom_type = 'GEOMETRY' default_error_messages = { 'required' : _('No geometry value provided.'), 'invalid_geom' : _('Invalid geometry value.'), 'invalid_geom_type' : _('Invalid geometry type.'), 'transform_error' : _('An error occurred when transforming the geometry ' 'to the SRID of the geometry form field.'), } def __init__(self, **kwargs): # Pop out attributes from the database field, or use sensible # defaults (e.g., allow None). self.srid = kwargs.pop('srid', None) self.geom_type = kwargs.pop('geom_type', self.geom_type) super(GeometryField, self).__init__(**kwargs) self.widget.attrs['geom_type'] = self.geom_type def to_python(self, value): """ Transforms the value to a Geometry object. """ if value in self.empty_values: return None if not isinstance(value, GEOSGeometry): try: value = GEOSGeometry(value) if not value.srid: value.srid = self.widget.map_srid except (GEOSException, ValueError, TypeError): raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom') return value def clean(self, value): """ Validates that the input value can be converted to a Geometry object (which is returned). A ValidationError is raised if the value cannot be instantiated as a Geometry. """ geom = super(GeometryField, self).clean(value) if geom is None: return geom # Ensuring that the geometry is of the correct type (indicated # using the OGC string label). if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY': raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type') # Transforming the geometry if the SRID was set. if self.srid: if not geom.srid: # Should match that of the field if not given. geom.srid = self.srid elif self.srid != -1 and self.srid != geom.srid: try: geom.transform(self.srid) except: raise forms.ValidationError(self.error_messages['transform_error'], code='transform_error') return geom def _has_changed(self, initial, data): """ Compare geographic value of data with its initial value. """ try: data = self.to_python(data) initial = self.to_python(initial) except forms.ValidationError: return True # Only do a geographic comparison if both values are available if initial and data: data.transform(initial.srid) # If the initial value was not added by the browser, the geometry # provided may be slightly different, the first time it is saved. # The comparison is done with a very low tolerance. return not initial.equals_exact(data, tolerance=0.000001) else: # Check for change of state of existence return bool(initial) != bool(data) class GeometryCollectionField(GeometryField): geom_type = 'GEOMETRYCOLLECTION' class PointField(GeometryField): geom_type = 'POINT' class MultiPointField(GeometryField): geom_type = 'MULTIPOINT' class LineStringField(GeometryField): geom_type = 'LINESTRING' class MultiLineStringField(GeometryField): geom_type = 'MULTILINESTRING' class PolygonField(GeometryField): geom_type = 'POLYGON' class MultiPolygonField(GeometryField): geom_type = 'MULTIPOLYGON'
{ "content_hash": "734564d53af62e1072fe62fd98974356", "timestamp": "", "source": "github", "line_count": 127, "max_line_length": 111, "avg_line_length": 34.99212598425197, "alnum_prop": 0.6266876687668766, "repo_name": "ZhaoCJ/django", "id": "55b887a445232bb8672dee16aa70ed8be173ca36", "size": "4444", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "django/contrib/gis/forms/fields.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from runner.koan import * # # Package hierarchy of Python Koans project: # # contemplate_koans.py # koans/ # __init__.py # about_asserts.py # about_attribute_access.py # about_class_attributes.py # about_classes.py # ... # a_package_folder/ # __init__.py # a_module.py class AboutPackages(Koan): def test_subfolders_can_form_part_of_a_module_package(self): # Import ./a_package_folder/a_module.py from .a_package_folder.a_module import Duck duck = Duck() self.assertEqual("Donald", duck.name) def test_subfolders_become_modules_if_they_have_an_init_module(self): # Import ./a_package_folder/__init__.py from .a_package_folder import an_attribute self.assertEqual(1984, an_attribute) def test_subfolders_without_an_init_module_are_not_part_of_the_package(self): # Import ./a_normal_folder/ with self.assertRaises(ImportError): from a_normal_folder import Duck # ------------------------------------------------------------------ def test_use_absolute_imports_to_import_upper_level_modules(self): # Import /contemplate_koans.py import contemplate_koans self.assertEqual('contemplate_koans', contemplate_koans.__name__) # contemplate_koans.py is the root module in this package because its # the first python module called in koans. # # If contemplate_koan.py was based in a_package_folder that would be # the root folder, which would make reaching the koans folder # almost impossible. So always leave the starting python script in # a folder which can reach everything else. def test_import_a_module_in_a_subfolder_folder_using_an_absolute_path(self): # Import contemplate_koans.py/koans/a_package_folder/a_module.py from koans.a_package_folder.a_module import Duck self.assertEqual('koans.a_package_folder.a_module', Duck.__module__)
{ "content_hash": "1a63bcdaacdd1bac0dd908c18452984a", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 81, "avg_line_length": 35.535714285714285, "alnum_prop": 0.6422110552763819, "repo_name": "dewaka/python-koans", "id": "934abc34fb8916bf804ebb086dc52bc091b6e969", "size": "2162", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python3/koans/about_packages.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "325708" }, { "name": "Shell", "bytes": "1603" } ], "symlink_target": "" }
import requests from bs4 import BeautifulSoup from time import sleep import pdfkit import os.path url = r'https://automatetheboringstuff.com' path=r'./wkhtmltopdf.exe' extrapath = r"C:\Users\lenovo\Documents\Vinay Python\github\Automate with python/" config = pdfkit.configuration(wkhtmltopdf=path) f = open('chapters.txt','r') data = f.read().split('\n') c = 1 print 'Local Status\t\t','Server status\t','Final Status\t','File' for i in data[1:-1]: #ignore chap 0 url = r'https://automatetheboringstuff.com' j = i.split('[->]') if 'https' not in j[-1].strip(): url = url+j[-1].strip() f = str(c)+'.'+j[0].split('–')[-1].strip()+'.pdf' if not os.path.isfile(f): r = requests.get(url) print 'File not found\t\t',r.status_code,'\t\t', pdf = pdfkit.from_url(url,f, configuration=config) #sleep(5) else: print 'File found\t\t','200\t\t', c += 1 print 'Done\t\t',f sleep(100000)
{ "content_hash": "9fa2b40a8e174490b6de0f01c60ece70", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 82, "avg_line_length": 31.70967741935484, "alnum_prop": 0.6103763987792472, "repo_name": "Vinay26k/python", "id": "0a8fd9938baff0a587df94cca8dd0ae29ebcb174", "size": "1011", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Projects/Automate with Python/GetContent.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "60031" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function, \ with_statement import os import sys import hashlib import logging from shadowsocks.common import ord def create_obfs(method): return plain(method) obfs_map = { 'plain': (create_obfs,), 'origin': (create_obfs,), } class plain(object): def __init__(self, method): self.method = method self.server_info = None def init_data(self): return b'' def get_overhead(self, direction): # direction: true for c->s false for s->c return 0 def get_server_info(self): return self.server_info def set_server_info(self, server_info): self.server_info = server_info def client_pre_encrypt(self, buf): return buf def client_encode(self, buf): return buf def client_decode(self, buf): # (buffer_to_recv, is_need_to_encode_and_send_back) return (buf, False) def client_post_decrypt(self, buf): return buf def server_pre_encrypt(self, buf): return buf def server_encode(self, buf): return buf def server_decode(self, buf): # (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back) return (buf, True, False) def server_post_decrypt(self, buf): return (buf, False) def client_udp_pre_encrypt(self, buf): return buf def client_udp_post_decrypt(self, buf): return buf def server_udp_pre_encrypt(self, buf): return buf def server_udp_post_decrypt(self, buf): return (buf, None) def dispose(self): pass def get_head_size(self, buf, def_value): if len(buf) < 2: return def_value head_type = ord(buf[0]) & 0x7 if head_type == 1: return 7 if head_type == 4: return 19 if head_type == 3: return 4 + ord(buf[1]) return def_value
{ "content_hash": "3ee0225106e8061dcfc0436a949b7d74", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 80, "avg_line_length": 22.295454545454547, "alnum_prop": 0.5861365953109072, "repo_name": "Cherrysaber/shadowsocksr", "id": "2b943ac2607ee0b5f4ab39d2419d3c336a85b2c1", "size": "2565", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "shadowsocks/obfsplugin/plain.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "365984" }, { "name": "Shell", "bytes": "459" } ], "symlink_target": "" }
""" WSGI config for budget_tracker project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "budget_tracker.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
{ "content_hash": "351e1f368783a38eca7d8f55277a4063", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 78, "avg_line_length": 28.785714285714285, "alnum_prop": 0.7766749379652605, "repo_name": "Codeidea/budget-tracker", "id": "bfd63741045b8da0c9f78527cc3a47ab026426bc", "size": "403", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "budget_tracker/wsgi.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "570837" }, { "name": "HTML", "bytes": "4693800" }, { "name": "JavaScript", "bytes": "8631" }, { "name": "Python", "bytes": "20288" } ], "symlink_target": "" }
""" The framework plugin. """ import json from cli.exceptions import CLIException from cli.mesos import get_frameworks from cli.plugins import PluginBase from cli.util import Table PLUGIN_NAME = "framework" PLUGIN_CLASS = "Framework" VERSION = "v0.1.0" SHORT_HELP = "Interacts with the Mesos Frameworks" class Framework(PluginBase): """ The framework plugin. """ COMMANDS = { "list": { "arguments": [], "flags": { "-a --all": "include inactive frameworks" }, "short_help": "List the Mesos frameworks.", "long_help": "List information about the Mesos frameworks." }, "inspect": { "arguments": ['<framework_id>'], "flags": {}, "short_help": "Return low-level information on the framework.", "long_help": "Return low-level information on the framework." } } def list(self, argv): """ Show a list of running frameworks """ try: master = self.config.master() except Exception as exception: raise CLIException("Unable to get leading master address: {error}" .format(error=exception)) data = get_frameworks(master, self.config) table = Table(["ID", "Active", "Hostname", "Name"]) for framework in data: if (not argv["--all"] and not framework["active"]): continue active = "False" if framework["active"]: active = "True" table.add_row([framework["id"], active, framework["hostname"], framework["name"]]) print(str(table)) def inspect(self, argv): """ Show the low-level information of the framework. """ try: master = self.config.master() except Exception as exception: raise CLIException("Unable to get leading master address: {error}" .format(error=exception)) data = get_frameworks(master, self.config) for framework in data: if framework["id"] != argv["<framework_id>"]: continue # remove not helpfull information framework.pop('tasks', None) framework.pop('unreachable_tasks', None) framework.pop('completed_tasks', None) print(json.dumps(framework, indent=4))
{ "content_hash": "b962f742c24641a76564f3229bc7f957", "timestamp": "", "source": "github", "line_count": 94, "max_line_length": 78, "avg_line_length": 27.148936170212767, "alnum_prop": 0.5270376175548589, "repo_name": "reneploetz/mesos", "id": "5ba04ef88a67cf1716bf2e72ef1cfe43fdf956e8", "size": "3337", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "src/python/cli_new/lib/cli/plugins/framework/main.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "8107" }, { "name": "C++", "bytes": "15379814" }, { "name": "CMake", "bytes": "110291" }, { "name": "CSS", "bytes": "8663" }, { "name": "Dockerfile", "bytes": "17479" }, { "name": "Groovy", "bytes": "1459" }, { "name": "HTML", "bytes": "101021" }, { "name": "Java", "bytes": "152539" }, { "name": "JavaScript", "bytes": "96892" }, { "name": "M4", "bytes": "203527" }, { "name": "Makefile", "bytes": "120855" }, { "name": "PowerShell", "bytes": "2547" }, { "name": "Python", "bytes": "363350" }, { "name": "Ruby", "bytes": "6234" }, { "name": "Shell", "bytes": "147287" } ], "symlink_target": "" }
from dart.engine.redshift.mappings import mapped_column_definition from dart.engine.redshift.metadata import RedshiftActionTypes def get_target_schema_and_table_name(action, dataset): schema_name = action.data.args.get('target_schema_name') or 'public' table_name = action.data.args.get('target_table_name') or dataset.data.table_name return schema_name, table_name def get_stage_schema_and_table_name(action, dataset): schema_name, table_name = get_target_schema_and_table_name(action, dataset) return 'dart_stage', schema_name + '_' + table_name + '_' + action.id def get_tracking_schema_and_table_name(action): table_name = 's3_files_for_action_%s' % action.id if action.data.action_type_name == RedshiftActionTypes.consume_subscription.name: table_name = 's3_files_for_subscription_%s' % action.data.args['subscription_id'] return 'dart_tracking', table_name def create_tracking_schema_and_table(conn, action): schema_name, table_name = get_tracking_schema_and_table_name(action) conn.execute("CREATE SCHEMA IF NOT EXISTS %s" % schema_name) sql = "CREATE TABLE IF NOT EXISTS %s.%s (s3_path VARCHAR(1024), updated TIMESTAMP)" % (schema_name, table_name) if action.data.action_type_name == RedshiftActionTypes.consume_subscription.name: sql = "CREATE TABLE IF NOT EXISTS %s.%s (s3_path VARCHAR(1024), updated TIMESTAMP, batch_id VARCHAR(36))" % (schema_name, table_name) conn.execute(sql) def create_schemas_and_tables(conn, action, dataset): """ :type action: dart.model.action.Action :type dataset: dart.model.dataset.Dataset """ args = action.data.args dd = dataset.data dist_key = args.get('target_distribution_key') or (dd.distribution_keys[0] if dd.distribution_keys else None) dist_style = 'KEY' if dist_key else (args.get('distribution_style') or 'EVEN') sk_keyword = 'INTERLEAVED SORTKEY' if args.get('sort_keys_interleaved') else 'SORTKEY' sort_keys = args.get('target_sort_keys') or dd.sort_keys schema_name, table_name = get_target_schema_and_table_name(action, dataset) table_sql = 'CREATE TABLE IF NOT EXISTS {schema_name}.{table_name}' table_options_sql = ' ({columns}{pk}){dist_style}{dist_key}{sort_keys}'.format( columns=', '.join([mapped_column_definition(c) for c in dataset.data.columns]), pk=', PRIMARY KEY (%s)' % ', '.join(dd.primary_keys) if dd.primary_keys else '', dist_style=' DISTSTYLE %s' % dist_style if dist_style else '', dist_key=' DISTKEY (%s)' % dist_key if dist_key else '', sort_keys=' %s (%s)' % (sk_keyword, ', '.join(sort_keys)) if sort_keys else '', ) # create the schema and target table conn.execute("CREATE SCHEMA IF NOT EXISTS %s" % schema_name) sql = table_sql.format(schema_name=schema_name, table_name=table_name) + table_options_sql conn.execute(sql) # create the schema and stage table schema_name, table_name = get_stage_schema_and_table_name(action, dataset) conn.execute("CREATE SCHEMA IF NOT EXISTS %s" % schema_name) sql = table_sql.format(schema_name=schema_name, table_name=table_name) + table_options_sql conn.execute(sql)
{ "content_hash": "aed73d0d24b935804f0054562df6d0f3", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 141, "avg_line_length": 50.74603174603175, "alnum_prop": 0.6912730685017203, "repo_name": "RetailMeNotSandbox/dart", "id": "082c6296437c6d7f6cfbd53114506c3096e45f99", "size": "3197", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/python/dart/engine/redshift/command/ddl.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "103727" }, { "name": "HTML", "bytes": "67636" }, { "name": "JavaScript", "bytes": "2762304" }, { "name": "Nginx", "bytes": "996" }, { "name": "PLpgSQL", "bytes": "1475" }, { "name": "Python", "bytes": "1025954" }, { "name": "Ruby", "bytes": "5523" }, { "name": "Shell", "bytes": "3100" } ], "symlink_target": "" }
from __future__ import division from django.conf import settings from django.db import models from django.db import transaction from django.db.models import Q from django.template import defaultfilters from django.utils import timezone from datetime import datetime from uchicagohvz.overwrite_fs import OverwriteFileSystemStorage from uchicagohvz.users.backend import UChicagoLDAPBackend from uchicagohvz.webhooks import * from mptt.models import MPTTModel, TreeForeignKey from ranking import Ranking import hashlib import os import random # Create your models here. def gen_rules_filename(instance, fn): return "rules/%s%s" % (instance.name, os.path.splitext(fn)[1]) def gen_pics_filename(instance, fn): return "pictures/%s%s" % (instance.picture.url, os.path.splitext(fn)[1]) class Dorm(models.Model): class Meta: ordering = ['name'] name = models.CharField(max_length=255) def __unicode__(self): return self.name class GameManager(models.Manager): def games_in_progress(self): now = timezone.now() return self.filter(start_date__lte=now, end_date__gte=now) class Game(models.Model): class Meta: ordering = ['-start_date'] name = models.CharField(max_length=255) registration_date = models.DateTimeField() start_date = models.DateTimeField() end_date = models.DateTimeField() dorms = models.ManyToManyField(Dorm) rules = models.FileField(upload_to=gen_rules_filename, storage=OverwriteFileSystemStorage()) picture = models.FileField(upload_to=gen_pics_filename, storage=OverwriteFileSystemStorage(), null=True, blank=True) color = models.CharField(max_length=64, default="#FFFFFF") flavor = models.TextField(max_length=6000, default="") objects = GameManager() def save(self, *args, **kwargs): webhook_send_command("!reset_roles") super(Game, self).save(*args, **kwargs) def __unicode__(self): return self.name def get_registered_players(self): return self.players.all() def get_active_players(self): return self.players.filter(active=True) @property def active_players_count(self): return self.get_active_players().count() def get_humans(self): return self.get_active_players().filter(human=True) def get_zombies(self): return self.get_active_players().filter(human=False) def get_players_in_dorm(self, dorm): return self.get_active_players().filter(dorm=dorm) def get_kills(self): """ Return all Kill objects for this game """ return Kill.objects.filter(killer__game=self) @property def status(self): if self.registration_date and self.start_date: now = timezone.now() if self.registration_date < now < self.start_date: return 'registration' elif self.start_date < now < self.end_date: return 'in_progress' elif now > self.end_date: return 'finished' else: return 'future' else: return 'N/A' @property def humans_listhost_address(self): return "%[email protected]" % defaultfilters.slugify(self.name) @property def zombies_listhost_address(self): return "%[email protected]" % defaultfilters.slugify(self.name) @models.permalink def get_absolute_url(self): return ('game|show', [self.pk]) class New_Squad(models.Model): class Meta: verbose_name = "Squad" verbose_name_plural = "Squads" unique_together = (('game', 'name')) game = models.ForeignKey(Game, related_name='new_squads') name = models.CharField(max_length=128) def __unicode__(self): return "%s" % (self.name) @models.permalink def get_absolute_url(self): return ('new_squad|show', [self.pk]) def get_active_players(self): return self.players.filter(active=True) def get_kills(self): return Kill.objects.exclude(parent=None).filter(killer__in=self.get_active_players()) @property def size(self): return self.get_active_players().count() @property def num_humans(self): return self.get_active_players().filter(human=True).count() @property def num_zombies(self): return self.get_active_players().filter(human=False).count() class Squad(models.Model): class Meta: verbose_name = "Old-style squad" verbose_name_plural = "Old-style squads" unique_together = (('game', 'name')) game = models.ForeignKey(Game, related_name='squads') name = models.CharField(max_length=128) def __unicode__(self): return "%s (%s)" % (self.name, self.game) @models.permalink def get_absolute_url(self): return ('squad|show', [self.pk]) def get_active_players(self): return self.players.filter(active=True) def get_kills(self): return Kill.objects.exclude(parent=None).filter(killer__in=self.get_active_players()) def get_awards(self): # returns a list of awards (with duplicates if won more than once) awards = [] sp = self.get_active_players() for aw in Award.objects.filter(players__in=self.get_active_players()).distinct(): awpl = aw.players.filter(pk__in=sp.values_list('pk', flat=True)) awards.append((aw, awpl)) return awards @property def size(self): return self.get_active_players().count() @property def num_humans(self): return self.get_active_players().filter(human=True).count() @property def num_zombies(self): return self.get_active_players().filter(human=False).count() @property def human_points(self): if self.get_active_players().count() > 0: return 10 * sum([p.human_points for p in self.get_active_players()]) / self.get_active_players().count() return 0 @property def zombie_points(self): if self.get_active_players().count() > 0: return 10 * sum([p.zombie_points for p in self.get_active_players()]) / self.get_active_players().count() return 0 @property def human_rank(self): from data_apis import top_human_squads ths = top_human_squads(self.game) squad_score = [x['human_points'] for x in ths if x['squad_id'] == self.id][0] scores = [x['human_points'] for x in ths] return (Ranking(scores, start=1).rank(squad_score), len(ths)) @property def zombie_rank(self): from data_apis import top_zombie_squads tzs = top_zombie_squads(self.game) squad_score = [x['zombie_points'] for x in tzs if x['squad_id'] == self.id][0] scores = [x['zombie_points'] for x in tzs] return (Ranking(scores, start=1).rank(squad_score), len(tzs)) NOUNS = open(os.path.join(settings.BASE_DIR, "game/word-lists/nouns.txt")).read().split('\n')[:-1] ADJECTIVES = open(os.path.join(settings.BASE_DIR, "game/word-lists/adjs.txt")).read().split('\n')[:-1] def gen_bite_code(): return random.choice(ADJECTIVES) + ' ' + random.choice(NOUNS) class Player(models.Model): class Meta: unique_together = (('user', 'game'), ('game', 'bite_code')) ordering = ['-game__start_date', 'user__username', 'user__last_name', 'user__first_name'] user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='+') game = models.ForeignKey(Game, related_name='players', help_text="To register a player for a game, a new Player record should be created. \ CHANGE THIS FOR EXISTING PLAYERS ONLY IF YOU KNOW WHAT YOU'RE DOING.") active = models.BooleanField(default=False) squad = models.ForeignKey(Squad, null=True, blank=True, related_name='players') new_squad = models.ForeignKey(New_Squad, null=True, blank=True, related_name='players') bite_code = models.CharField(max_length=255, blank=True, help_text='leave blank for automatic (re-)generation') dorm = models.ForeignKey(Dorm) major = models.CharField(max_length=255, blank=True, editable=settings.DEBUG, help_text='autopopulates from LDAP') human = models.BooleanField(default=True) opt_out_hvt = models.BooleanField(default=False) gun_requested = models.BooleanField(default=False) renting_gun = models.BooleanField(default=False) gun_returned = models.BooleanField(default=False) last_words = models.CharField(max_length=255, blank=True) lead_zombie = models.BooleanField(default=False) delinquent_gun = models.BooleanField(default=False) def save(self, *args, **kwargs): if self.game.status == 'registration' or not self.major: # allow updates to major during registration backend = UChicagoLDAPBackend() self.major = backend.get_user_major(self.user.username) if not self.bite_code: # (re-)generate unique bite code while True: bc = gen_bite_code() if not (Player.objects.filter(game=self.game, bite_code=bc).exists() or Award.objects.filter(game=self.game, code=bc).exists()): self.bite_code = bc break old = Player.objects.get(id=self.id) if self.id else None if (old and (not old.active) and self.active): # if user was newly activated profile = self.user.profile profile.subscribe_zombies_listhost = True # force subscription to zombies listhost profile.save() past_players = Player.objects.filter(user=self.user).exclude(game=self.game) if past_players and (not past_players[0].gun_returned) and past_players[0].renting_gun: self.delinquent_gun = True if self.user.profile.discord_tag != "": webhook_send_command("!register_player %s %d" %(self.user.profile.discord_tag, self.human)) super(Player, self).save(*args, **kwargs) @property def kill_object(self): kills = Kill.objects.exclude(parent=None).filter(victim=self).order_by('-date') if kills.exists(): return kills[0] return None @property def killed_by(self): ko = self.kill_object if ko: return ko.killer return None @property def time_of_death(self): if self.human == False and self.kill_object: return self.kill_object.date return None @property def lifespan(self): if self.game.status == 'in_progress': end_time = timezone.now() elif self.game.status == 'finished': end_time = self.game.end_date else: return None if not self.human: end_time = self.time_of_death return end_time - self.game.start_date @property def kills(self): return Kill.objects.filter(killer=self).exclude(victim__id=self.id).order_by('-date') @property def unannotated_kills(self): return Kill.objects.exclude(killer=self, victim=self).filter(killer=self).filter(Q(lat__isnull=True) | Q(lng__isnull=True) | Q(notes=u'')) @transaction.atomic def kill_me(self, killer): if not self.human: return None parent_kills = Kill.objects.filter(victim=killer).order_by('-date') if parent_kills.exists(): parent_kill = parent_kills[0] else: parent_kill = None points = 0 now = timezone.now() try: hvt = HighValueTarget.objects.get(player=self, start_date__lte=now, end_date__gte=now) except HighValueTarget.DoesNotExist: hvt = None else: points += hvt.kill_points try: hvd = HighValueDorm.objects.get(game=self.game, dorm=self.dorm, start_date__lte=now, end_date__gte=now) except HighValueDorm.DoesNotExist: hvd = None else: points += hvd.points if not (hvt or hvd): points = settings.HUMAN_KILL_POINTS return Kill.objects.create(parent=parent_kill, killer=killer, victim=self, points=points, date=now, hvt=hvt, hvd=hvd, notes='') @property def display_name(self): # real name when game is over; otherwise, dorm + obfuscated code for humans and bite code for zombies name = '' if self.game.status == 'in_progress': if self.human: name = "%s %s" % (self.dorm.name, hashlib.sha256(self.bite_code).hexdigest()[:2].upper()) else: name = self.bite_code else: name = self.user.get_full_name() if not name: name = "CONTACT A MODERATOR" if self.squad: name = "%s [%s]" % (name, self.squad.name) elif self.new_squad: name = "%s [%s]" % (name, self.new_squad.name) return name @property def human_points(self): hvt_points = 0 if self.human: try: hvt_points = self.hvt.award_points except HighValueTarget.DoesNotExist: hvt_points = 0 return (self.awards.filter(redeem_type__in=('H', 'A')).aggregate(points=models.Sum('points'))['points'] or 0) + hvt_points @property def zombie_points(self): kill_points = Kill.objects.exclude(parent=None, killer=self, victim=self).filter(killer=self).aggregate(points=models.Sum('points'))['points'] or 0 award_points = self.awards.filter(redeem_type__in=('Z', 'A')).aggregate(points=models.Sum('points'))['points'] or 0 return kill_points + award_points @property def human_rank(self): from data_apis import top_humans th = top_humans(self.game) try: player_score = [x['human_points'] for x in th if x['player_id'] == self.id][0] except IndexError: player_score = 0 scores = [x['human_points'] for x in th] return (Ranking(scores, start=1).rank(player_score), len(th)) @property def zombie_rank(self): from data_apis import top_zombies tz = top_zombies(self.game) try: player_score = [x['zombie_points'] for x in tz if x['player_id'] == self.id][0] except IndexError: return None scores = [x['zombie_points'] for x in tz] return (Ranking(scores, start=1).rank(player_score), len(tz)) def __unicode__(self): return "%s [%s]" % (self.user.get_full_name(), self.game) @models.permalink def get_absolute_url(self): return ('player|show', [self.pk]) class MissionPicture(models.Model): players = models.ManyToManyField(Player, related_name='pictures', blank=True, help_text='Players in this picture.') game = models.ForeignKey(Game, related_name="pictures") picture = models.FileField(upload_to=gen_pics_filename, storage=OverwriteFileSystemStorage()) lat = models.FloatField(null=True, blank=True, verbose_name='latitude') lng = models.FloatField(null=True, blank=True, verbose_name='longitude') def __unicode__(self): name = "" for p in self.players.all(): name += p.user.get_full_name() + " " name += self.game.name return name @property def geotagged(self): return self.lat and self.lng @models.permalink def get_absolute_url(self): return ('mission_picture|show', [self.pk]) class Kill(MPTTModel): class Meta: ordering = ['-date'] unique_together = ('parent', 'killer', 'victim') class MPTTMeta: order_insertion_by = ['date'] parent = TreeForeignKey('self', null=True, blank=True, related_name='children', editable=False) killer = models.ForeignKey(Player, related_name="+") victim = models.ForeignKey(Player, related_name="+") date = models.DateTimeField(default=timezone.now) points = models.IntegerField(default=settings.HUMAN_KILL_POINTS) hvd = models.ForeignKey('game.HighValueDorm', verbose_name='High-value Dorm', null=True, blank=True, related_name='kills', on_delete=models.SET_NULL) hvt = models.OneToOneField('game.HighValueTarget', verbose_name='High-value target', null=True, blank=True, related_name='kill', on_delete=models.SET_NULL) notes = models.TextField(blank=True) lat = models.FloatField(null=True, blank=True, verbose_name='latitude') lng = models.FloatField(null=True, blank=True, verbose_name='longitude') def __unicode__(self): return "%s (%s) --> %s (%s) [%s]" % (self.killer.user.get_full_name(), self.killer.user.username, self.victim.user.get_full_name(), self.victim.user.username, self.killer.game.name) @property def game(self): return self.killer.game @property def geotagged(self): return self.lat and self.lng @models.permalink def get_absolute_url(self): return ('kill|show', [self.pk]) def refresh_points(self): """ Update the number of points the kill is worth, taking into account HVT and HVD """ points = 0 if self.hvt: points += self.hvt.kill_points if self.hvd: points += self.hvd.points if not (self.hvd or self.hvt): points = settings.HUMAN_KILL_POINTS self.points = points def save(self, *args, **kwargs): if self.killer.game != self.victim.game: raise Exception('killer.game and victim.game do not match.') try: parent = Kill.objects.exclude(id=self.id).filter(victim=self.killer)[0] except: parent = None self.parent = parent victim = self.victim victim.human = False victim.save() self.refresh_points() super(Kill, self).save(*args, **kwargs) REDEEM_TYPES = ( ('H', 'Humans only'), ('Z', 'Zombies only'), ('A', 'All players'), ) class Award(models.Model): class Meta: unique_together = (('game', 'name'), ('game', 'code')) game = models.ForeignKey(Game, related_name='+') name = models.CharField(max_length=255) points = models.FloatField(help_text='Can be negative, e.g. to penalize players') players = models.ManyToManyField(Player, related_name='awards', blank=True, help_text='Players that should receive this award.') code = models.CharField(max_length=255, blank=True, help_text='leave blank for automatic (re-)generation') redeem_limit = models.IntegerField( help_text='Maximum number of players that can redeem award via code entry (set to 0 for awards to be added by moderators only)' ) redeem_type = models.CharField(max_length=1, choices=REDEEM_TYPES) def __unicode__(self): return "%s (%s)" % (self.name, self.game.name) def save(self, *args, **kwargs): if not self.code: while True: code = gen_bite_code() if not (Award.objects.filter(game=self.game, code=code).exists() or Player.objects.filter(game=self.game, bite_code=code).exists()): self.code = code break super(Award, self).save(*args, **kwargs) class HighValueTarget(models.Model): player = models.OneToOneField(Player, unique=True, related_name='hvt') start_date = models.DateTimeField() end_date = models.DateTimeField() kill_points = models.IntegerField(default=settings.HVT_KILL_POINTS, help_text='# of points zombies receive for killing this HVT') award_points = models.IntegerField(default=settings.HVT_AWARD_POINTS, help_text='# of points the HVT earns if he/she survives for the entire duration') def __unicode__(self): return "%s" % (self.player) def expired(self): return timezone.now() > self.end_date def save(self, *args, **kwargs): super(HighValueTarget, self).save(*args, **kwargs) try: kill = self.kill except Kill.DoesNotExist: return else: kill.refresh_points() kill.save() class HighValueDorm(models.Model): class Meta: unique_together = ('game', 'dorm') game = models.ForeignKey(Game) dorm = models.ForeignKey(Dorm) start_date = models.DateTimeField() end_date = models.DateTimeField() points = models.IntegerField(default=settings.HVD_KILL_POINTS) def __unicode__(self): return "%s (%s)" % (self.dorm.name, self.game.name) def save(self, *args, **kwargs): super(HighValueDorm, self).save(*args, **kwargs) from uchicagohvz.game.tasks import refresh_kill_points refresh_kill_points.delay(self.game.id) class Mission(models.Model): class Meta: unique_together = ('game', 'name') game = models.ForeignKey(Game, related_name='missions') name = models.CharField(max_length=63) awards = models.ManyToManyField(Award, related_name='missions', blank=True, help_text='Awards associated with this mission.') description = models.CharField(max_length=255) summary = models.TextField(max_length=6000, default="") zombies_win = models.BooleanField(default=False) #because mods hate zombies :P def __unicode__(self): return "%s (%s)" % (self.name, self.game.name) def save(self, *args, **kwargs): super(Mission, self).save(*args, **kwargs) def mission_attendance(self, *args, **kwargs): attendees = [] for award in self.awards.all(): for player in award.players.all(): if player not in attendees: attendees.append(player) return len(attendees) class MinecraftUser(models.Model): player = models.OneToOneField(Player, unique=True, related_name='minecraft_user') human_score = models.IntegerField(default=0) zombie_score = models.IntegerField(default=0) player_uuid = models.CharField(max_length=255) def save(self, *args, **kwargs): super(MinecraftUser, self).save(*args, **kwargs) from . import signals
{ "content_hash": "ca297391dcee299cf4ef9b1fe4a8e58d", "timestamp": "", "source": "github", "line_count": 613, "max_line_length": 183, "avg_line_length": 32.20554649265905, "alnum_prop": 0.703424171816432, "repo_name": "kz26/uchicago-hvz", "id": "e9539ec213c15342ae924701f100018c32bce7b3", "size": "19742", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "uchicagohvz/game/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2359" }, { "name": "CoffeeScript", "bytes": "2335" }, { "name": "HTML", "bytes": "545427" }, { "name": "JavaScript", "bytes": "1" }, { "name": "Python", "bytes": "126936" } ], "symlink_target": "" }
import numpy as np import unittest import GPyOpt from GPyOpt.util.general import samples_multidimensional_uniform from base_test_case import BaseTestCase class TestInputWarpedGP(BaseTestCase): ''' Unittest for the InputWarpedGP functions ''' def setUp(self): ## # -- methods configuration ## model_type = 'input_warped_GP' initial_design_numdata = None initial_design_type = 'random' acquisition_type = 'EI' normalize_Y = True exact_feval = True acquisition_optimizer_type = 'lbfgs' model_update_interval = 1 evaluator_type = 'sequential' batch_size = 1 num_cores = 1 verbosity = False # stop conditions max_iter = 15 max_time = 999 eps = 1e-8 self.methods_configs = [ { 'name': 'input_warped_GP', 'model_type' : model_type, 'initial_design_numdata' : initial_design_numdata, 'initial_design_type' : initial_design_type, 'acquisition_type' : acquisition_type, 'normalize_Y' : normalize_Y, 'exact_feval' : exact_feval, 'acquisition_optimizer_type' : acquisition_optimizer_type, 'model_update_interval' : model_update_interval, 'verbosity' : verbosity, 'evaluator_type' : evaluator_type, 'batch_size' : batch_size, 'num_cores' : num_cores, 'max_iter' : max_iter, 'max_time' : max_time, 'eps' : eps } ] # -- Problem setup np.random.seed(1) n_inital_design = 5 input_dim = 5 self.problem_config = { 'objective': GPyOpt.objective_examples.experimentsNd.alpine1(input_dim = input_dim).f, 'domain': [{'name': 'var1_2', 'type': 'continuous', 'domain': (-10,10),'dimensionality': 2}, {'name': 'var3', 'type': 'continuous', 'domain': (-8,3)}, {'name': 'var4', 'type': 'discrete', 'domain': (-2,0,2)}, {'name': 'var5', 'type': 'discrete', 'domain': (-1,5)}], 'constraints': None, 'cost_withGradients': None} feasible_region = GPyOpt.Design_space(space = self.problem_config['domain'], constraints = self.problem_config['constraints']) self.f_inits = GPyOpt.experiment_design.initial_design('random', feasible_region, 5) self.f_inits = self.f_inits.reshape(n_inital_design, input_dim) def test_run(self): self.check_configs() if __name__ == '__main__': unittest.main()
{ "content_hash": "0cd54e29aa3c0177a0befd4601e87e85", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 134, "avg_line_length": 39.674698795180724, "alnum_prop": 0.44397206194959005, "repo_name": "SheffieldML/GPyOpt", "id": "2691524f18ff42446f75534ddfddd0135f49044d", "size": "3395", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "GPyOpt/testing/functional_tests/test_input_warped_gp.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Jupyter Notebook", "bytes": "1191064" }, { "name": "Python", "bytes": "368322" } ], "symlink_target": "" }
import kfp import json import copy from kfp import components from kfp import dsl from kfp.aws import use_aws_secret sagemaker_workteam_op = components.load_component_from_file('../../../../components/aws/sagemaker/workteam/component.yaml') sagemaker_gt_op = components.load_component_from_file('../../../../components/aws/sagemaker/ground_truth/component.yaml') sagemaker_train_op = components.load_component_from_file('../../../../components/aws/sagemaker/train/component.yaml') channelObjList = [] channelObj = { 'ChannelName': '', 'DataSource': { 'S3DataSource': { 'S3Uri': '', 'S3DataType': 'AugmentedManifestFile', 'S3DataDistributionType': 'FullyReplicated', 'AttributeNames': ['source-ref', 'category'] } }, 'ContentType': 'application/x-recordio', 'CompressionType': 'None', 'RecordWrapperType': 'RecordIO' } @dsl.pipeline( name='Ground Truth image classification test pipeline', description='SageMaker Ground Truth job test' ) def ground_truth_test(region='us-west-2', team_name='ground-truth-demo-team', team_description='Team for mini image classification labeling job', user_pool='', user_groups='', client_id='', ground_truth_train_job_name='mini-image-classification-demo-train', ground_truth_validation_job_name='mini-image-classification-demo-validation', ground_truth_label_attribute_name='category', ground_truth_train_manifest_location='s3://your-bucket-name/mini-image-classification/ground-truth-demo/train.manifest', ground_truth_validation_manifest_location='s3://your-bucket-name/mini-image-classification/ground-truth-demo/validation.manifest', ground_truth_output_location='s3://your-bucket-name/mini-image-classification/ground-truth-demo/output', ground_truth_task_type='image classification', ground_truth_worker_type='private', ground_truth_label_category_config='s3://your-bucket-name/mini-image-classification/ground-truth-demo/class_labels.json', ground_truth_ui_template='s3://your-bucket-name/mini-image-classification/ground-truth-demo/instructions.template', ground_truth_title='Mini image classification', ground_truth_description='Test for Ground Truth KFP component', ground_truth_num_workers_per_object=1, ground_truth_time_limit=30, ground_truth_task_availibility=3600, ground_truth_max_concurrent_tasks=20, training_algorithm_name='image classification', training_input_mode='Pipe', training_hyperparameters={"num_classes": "2", "num_training_samples": "14", "mini_batch_size": "2"}, training_output_location='s3://your-bucket-name/mini-image-classification/training-output', training_instance_type='ml.p2.xlarge', training_instance_count=1, training_volume_size=50, training_max_run_time=3600, role_arn='' ): workteam = sagemaker_workteam_op( region=region, team_name=team_name, description=team_description, user_pool=user_pool, user_groups=user_groups, client_id=client_id ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) ground_truth_train = sagemaker_gt_op( region=region, role=role_arn, job_name=ground_truth_train_job_name, label_attribute_name=ground_truth_label_attribute_name, manifest_location=ground_truth_train_manifest_location, output_location=ground_truth_output_location, task_type=ground_truth_task_type, worker_type=ground_truth_worker_type, workteam_arn=workteam.output, label_category_config=ground_truth_label_category_config, ui_template=ground_truth_ui_template, title=ground_truth_title, description=ground_truth_description, num_workers_per_object=ground_truth_num_workers_per_object, time_limit=ground_truth_time_limit, task_availibility=ground_truth_task_availibility, max_concurrent_tasks=ground_truth_max_concurrent_tasks ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) ground_truth_validation = sagemaker_gt_op( region=region, role=role_arn, job_name=ground_truth_validation_job_name, label_attribute_name=ground_truth_label_attribute_name, manifest_location=ground_truth_validation_manifest_location, output_location=ground_truth_output_location, task_type=ground_truth_task_type, worker_type=ground_truth_worker_type, workteam_arn=workteam.output, label_category_config=ground_truth_label_category_config, ui_template=ground_truth_ui_template, title=ground_truth_title, description=ground_truth_description, num_workers_per_object=ground_truth_num_workers_per_object, time_limit=ground_truth_time_limit, task_availibility=ground_truth_task_availibility, max_concurrent_tasks=ground_truth_max_concurrent_tasks ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) channelObj['ChannelName'] = 'train' channelObj['DataSource']['S3DataSource']['S3Uri'] = str(ground_truth_train.outputs['output_manifest_location']) channelObjList.append(copy.deepcopy(channelObj)) channelObj['ChannelName'] = 'validation' channelObj['DataSource']['S3DataSource']['S3Uri'] = str(ground_truth_validation.outputs['output_manifest_location']) channelObjList.append(copy.deepcopy(channelObj)) training = sagemaker_train_op( region=region, algorithm_name=training_algorithm_name, training_input_mode=training_input_mode, hyperparameters=training_hyperparameters, channels=json.dumps(channelObjList), instance_type=training_instance_type, instance_count=training_instance_count, volume_size=training_volume_size, max_run_time=training_max_run_time, model_artifact_path=training_output_location, role=role_arn ).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')) if __name__ == '__main__': kfp.compiler.Compiler().compile(ground_truth_test, __file__ + '.zip')
{ "content_hash": "98c6fff232c13269044e7a5ca5200866", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 134, "avg_line_length": 45.210144927536234, "alnum_prop": 0.6986696585991344, "repo_name": "kubeflow/kfp-tekton-backend", "id": "cba865328f65b4385a5ee9df3132febe4c035ba7", "size": "6263", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "samples/contrib/aws-samples/ground_truth_pipeline_demo/mini-image-classification-pipeline.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "47293" }, { "name": "Go", "bytes": "1269081" }, { "name": "HTML", "bytes": "3584" }, { "name": "JavaScript", "bytes": "24828" }, { "name": "Jupyter Notebook", "bytes": "177616" }, { "name": "Makefile", "bytes": "9694" }, { "name": "PowerShell", "bytes": "3194" }, { "name": "Python", "bytes": "1628570" }, { "name": "Scala", "bytes": "13000" }, { "name": "Shell", "bytes": "180020" }, { "name": "Smarty", "bytes": "7694" }, { "name": "Starlark", "bytes": "76037" }, { "name": "TypeScript", "bytes": "1641150" } ], "symlink_target": "" }
import mock from oslo_serialization import jsonutils from requests import Response from vbclient.common import display from vbclient.common import manager from vbclient.common import resource as r from vbclient.common import utils from vbclient.v1 import job_mgr from vbclient.v2 import backup_mgr # fake request id from vbclient.v2 import restore_mgr FAKE_REQUEST_ID = 'req-0594c66b-6973-405c-ae2c-43fcfc00f2e3' # fake resource id FAKE_RESOURCE_ID = '0594c66b-6973-405c-ae2c-43fcfc00f2e3' FAKE_RESOURCE_NAME = 'name-0594c66b-6973-405c-ae2c-43fcfc00f2e3' # fake resource response key FAKE_RESOURCE_ITEM_URL = '/resources/%s' FAKE_RESOURCE_COLLECTION_URL = '/resources' def create_response(json=None): resp = Response() resp.headers['x-openstack-request-id'] = FAKE_REQUEST_ID if json: resp.json = mock.MagicMock() resp.json.return_value = json return resp def create_response_with_compute_header(): resp = Response() resp.headers['x-compute-request-id'] = FAKE_REQUEST_ID return resp class FakeResource(r.Resource, display.Display): pass class FakeManager(manager.Manager): resource_class = FakeResource def __init__(self, http_client=None): super(FakeManager, self).__init__(http_client) def get(self, resource, **kwargs): resource_url = FAKE_RESOURCE_ITEM_URL % utils.get_id(resource) return self._get(resource_url, **kwargs) def list(self): return self._list(FAKE_RESOURCE_COLLECTION_URL, key='resources') def update(self, resource, **kwargs): return self._update(FAKE_RESOURCE_ITEM_URL % utils.get_id(resource), resource, **kwargs) def update_all(self, resource, **kwargs): resource_url = FAKE_RESOURCE_ITEM_URL % utils.get_id(resource) return self._update_all(resource_url, resource, **kwargs) def create(self, resource=None): return self._create(FAKE_RESOURCE_COLLECTION_URL, json=resource) def delete(self, resource): return self._delete(FAKE_RESOURCE_ITEM_URL % utils.get_id(resource)) class FakeRaw(object): version = 110 class FakeHTTPResponse(object): version = 1.1 def __init__(self, status_code, reason, headers, content): self.headers = headers self.content = content self.status_code = status_code self.reason = reason self.raw = FakeRaw() def getheader(self, name, default=None): return self.headers.get(name, default) def getheaders(self): return self.headers.items() def read(self, amt=None): b = self.content self.content = None return b def iter_content(self, chunksize): return self.content def json(self): return jsonutils.loads(self.content) class FakeVolumeBackupClient(object): def __init__(self, **kwargs): self.fake_http_client = mock.Mock() self.backup_mgr = backup_mgr.VolumeBackupManager(self.fake_http_client) self.restore_mgr = restore_mgr.VolumeBackupRestoreManager( self.fake_http_client ) self.job_mgr = job_mgr.JobManager(self.fake_http_client)
{ "content_hash": "db063791e03da605e7ac4a2c96d4a5c9", "timestamp": "", "source": "github", "line_count": 114, "max_line_length": 79, "avg_line_length": 28.12280701754386, "alnum_prop": 0.670617592014972, "repo_name": "Huawei/OpenStackClient_VBS", "id": "abad1233096688ceb2b98f76ab6d786557567f83", "size": "3818", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "vbclient/tests/fakes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "82795" } ], "symlink_target": "" }
def main (): # User inputs her first last mother madian city name firstName = (input("Please enter you First name: ")) lastName = (input("Please enter you Last name: ")) madName = (input ("Please enter your mothers maiden name: ")) city = (input("Please enter you City name: ")) # Program prints starwars name print ("Your Starwars First name is : ", firstName [0:3] + lastName [0:2]) print ("Your Starwars last name is :", madName [0:3] + city [0:2]) main()
{ "content_hash": "e5d69b906f82a436030a374d87d7a48b", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 78, "avg_line_length": 31.3125, "alnum_prop": 0.6307385229540918, "repo_name": "cynthiacarter/Week-Three-Assignment", "id": "3c57e2535072c78ad493c613d0025d6a359e398b", "size": "634", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "StarWarsName.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "894" } ], "symlink_target": "" }
""" Starts a service to scan in intervals for new devices. Will emit EVENT_PLATFORM_DISCOVERED whenever a new service has been discovered. Knows which components handle certain types, will make sure they are loaded before the EVENT_PLATFORM_DISCOVERED is fired. """ import logging import threading import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import EVENT_HOMEASSISTANT_START from homeassistant.helpers.discovery import load_platform, discover REQUIREMENTS = ['netdisco==0.8.3'] DOMAIN = 'discovery' SCAN_INTERVAL = 300 # seconds SERVICE_NETGEAR = 'netgear_router' SERVICE_WEMO = 'belkin_wemo' SERVICE_HASS_IOS_APP = 'hass_ios' SERVICE_HANDLERS = { SERVICE_HASS_IOS_APP: ('ios', None), SERVICE_NETGEAR: ('device_tracker', None), SERVICE_WEMO: ('wemo', None), 'philips_hue': ('light', 'hue'), 'google_cast': ('media_player', 'cast'), 'panasonic_viera': ('media_player', 'panasonic_viera'), 'plex_mediaserver': ('media_player', 'plex'), 'roku': ('media_player', 'roku'), 'sonos': ('media_player', 'sonos'), 'yamaha': ('media_player', 'yamaha'), 'logitech_mediaserver': ('media_player', 'squeezebox'), 'directv': ('media_player', 'directv'), 'denonavr': ('media_player', 'denonavr'), 'samsung_tv': ('media_player', 'samsungtv'), 'yeelight': ('light', 'yeelight'), 'flux_led': ('light', 'flux_led'), 'apple_tv': ('media_player', 'apple_tv'), 'openhome': ('media_player', 'openhome'), } CONF_IGNORE = 'ignore' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Optional(CONF_IGNORE, default=[]): vol.All(cv.ensure_list, [vol.In(SERVICE_HANDLERS)]) }), }, extra=vol.ALLOW_EXTRA) def setup(hass, config): """Start a discovery service.""" logger = logging.getLogger(__name__) from netdisco.service import DiscoveryService # Disable zeroconf logging, it spams logging.getLogger('zeroconf').setLevel(logging.CRITICAL) # Platforms ignore by config ignored_platforms = config[DOMAIN][CONF_IGNORE] lock = threading.Lock() def new_service_listener(service, info): """Called when a new service is found.""" if service in ignored_platforms: logger.info("Ignoring service: %s %s", service, info) return with lock: logger.info("Found new service: %s %s", service, info) comp_plat = SERVICE_HANDLERS.get(service) # We do not know how to handle this service. if not comp_plat: return component, platform = comp_plat if platform is None: discover(hass, service, info, component, config) else: load_platform(hass, component, platform, info, config) # pylint: disable=unused-argument def start_discovery(event): """Start discovering.""" netdisco = DiscoveryService(SCAN_INTERVAL) netdisco.add_listener(new_service_listener) netdisco.start() hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_discovery) return True
{ "content_hash": "6fae674548a287c68b6bd26d6122c9b1", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 79, "avg_line_length": 30.54368932038835, "alnum_prop": 0.6443102352193262, "repo_name": "Duoxilian/home-assistant", "id": "b8999ee2c43b739c3302f252e345f4bba1333836", "size": "3146", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/discovery.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1584258" }, { "name": "Python", "bytes": "5414513" }, { "name": "Ruby", "bytes": "379" }, { "name": "Shell", "bytes": "14220" } ], "symlink_target": "" }
import random import time class User: def __init__(self): self.identifier = str(random.randrange(2**64)) self.timestamp = time.time()
{ "content_hash": "ff8951d38e46cf071f7accb9c66b6658", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 54, "avg_line_length": 25.5, "alnum_prop": 0.6405228758169934, "repo_name": "analogbit/barbatus", "id": "c6683b3aba413c7d9bee5dfd4372dda2185d2e16", "size": "153", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "User.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2074" } ], "symlink_target": "" }
"""Contains the definition for inception v2 classification network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nets import inception_utils slim = tf.contrib.slim trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) def inception_v2_base(inputs, final_endpoint='Mixed_5c', min_depth=16, depth_multiplier=1.0, use_separable_conv=True, data_format='NHWC', scope=None): """Inception v2 (6a2). Constructs an Inception v2 network from inputs to the given final endpoint. This method can construct the network up to the layer inception(5b) as described in http://arxiv.org/abs/1502.03167. Args: inputs: a tensor of shape [batch_size, height, width, channels]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c']. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. use_separable_conv: Use a separable convolution for the first layer Conv2d_1a_7x7. If this is False, use a normal convolution instead. data_format: Data format of the activations ('NHWC' or 'NCHW'). scope: Optional variable_scope. Returns: tensor_out: output tensor corresponding to the final_endpoint. end_points: a set of activations for external use, for example summaries or losses. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ # end_points will collect relevant activations for external use, for example # summaries or losses. end_points = {} # Used to find thinned depths for each layer. if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') depth = lambda d: max(int(d * depth_multiplier), min_depth) if data_format != 'NHWC' and data_format != 'NCHW': raise ValueError('data_format must be either NHWC or NCHW.') if data_format == 'NCHW' and use_separable_conv: raise ValueError( 'separable convolution only supports NHWC layout. NCHW data format can' ' only be used when use_separable_conv is False.' ) concat_dim = 3 if data_format == 'NHWC' else 1 with tf.variable_scope(scope, 'InceptionV2', [inputs]): with slim.arg_scope( [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME', data_format=data_format): # Note that sizes in the comments below assume an input spatial size of # 224x224, however, the inputs can be of any size greater 32x32. # 224 x 224 x 3 end_point = 'Conv2d_1a_7x7' if use_separable_conv: # depthwise_multiplier here is different from depth_multiplier. # depthwise_multiplier determines the output channels of the initial # depthwise conv (see docs for tf.nn.separable_conv2d), while # depth_multiplier controls the # channels of the subsequent 1x1 # convolution. Must have # in_channels * depthwise_multipler <= out_channels # so that the separable convolution is not overparameterized. depthwise_multiplier = min(int(depth(64) / 3), 8) net = slim.separable_conv2d( inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier, stride=2, padding='SAME', weights_initializer=trunc_normal(1.0), scope=end_point) else: # Use a normal convolution instead of a separable convolution. net = slim.conv2d( inputs, depth(64), [7, 7], stride=2, weights_initializer=trunc_normal(1.0), scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 112 x 112 x 64 end_point = 'MaxPool_2a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 64 end_point = 'Conv2d_2b_1x1' net = slim.conv2d(net, depth(64), [1, 1], scope=end_point, weights_initializer=trunc_normal(0.1)) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 64 end_point = 'Conv2d_2c_3x3' net = slim.conv2d(net, depth(192), [3, 3], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 192 end_point = 'MaxPool_3a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 192 # Inception module. end_point = 'Mixed_3b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 256 end_point = 'Mixed_3c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 320 end_point = 'Mixed_4a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d( branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d( branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d( net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d( branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4d' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4e' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_5a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 7 x 7 x 1024 end_point = 'Mixed_5b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 7 x 7 x 1024 end_point = 'Mixed_5c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint) def inception_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV2'): """Inception v2 model for classification. Constructs an Inception v2 network for classification as described in http://arxiv.org/abs/1502.03167. The default image size used to train this network is 224x224. Args: inputs: a tensor of shape [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: logits: the pre-softmax activations, a tensor of size [batch_size, num_classes] end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') # Final pooling and prediction with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): net, end_points = inception_v2_base( inputs, scope=scope, min_depth=min_depth, depth_multiplier=depth_multiplier) with tf.variable_scope('Logits'): kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7]) net = slim.avg_pool2d(net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size)) # 1 x 1 x 1024 net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b') logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1') if spatial_squeeze: logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze') end_points['Logits'] = logits end_points['Predictions'] = prediction_fn(logits, scope='Predictions') return logits, end_points inception_v2.default_image_size = 224 def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): """Define kernel size which is automatically reduced for small input. If the shape of the input images is unknown at graph construction time this function assumes that the input images are is large enough. Args: input_tensor: input tensor of size [batch_size, height, width, channels]. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] Returns: a tensor with the kernel size. TODO(jrru): Make this function work with unknown shapes. Theoretically, this can be done with the code below. Problems are two-fold: (1) If the shape was known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot handle tensors that define the kernel size. shape = tf.shape(input_tensor) return = tf.stack([tf.minimum(shape[1], kernel_size[0]), tf.minimum(shape[2], kernel_size[1])]) """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = kernel_size else: kernel_size_out = [min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])] return kernel_size_out inception_v2_arg_scope = inception_utils.inception_arg_scope
{ "content_hash": "81470dad71959358372d7b02bbdd3585", "timestamp": "", "source": "github", "line_count": 544, "max_line_length": 90, "avg_line_length": 51.123161764705884, "alnum_prop": 0.4940850742511956, "repo_name": "fisheess/modular_SSD_tensorflow", "id": "0bb2832695acd7d0cfdf6ee22cb8d968a979a8f7", "size": "28496", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nets/inception_v2.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "513015" } ], "symlink_target": "" }
from __future__ import print_function from future.utils import viewitems import itertools import csv import exampleIO import dedupe import os import time import random import optparse import logging from collections import defaultdict optp = optparse.OptionParser() optp.add_option('-v', '--verbose', dest='verbose', action='count', help='Increase verbosity (specify multiple times for more)' ) (opts, args) = optp.parse_args() log_level = logging.WARNING if opts.verbose : if opts.verbose == 1: log_level = logging.INFO elif opts.verbose >= 2: log_level = logging.DEBUG logging.getLogger().setLevel(log_level) # create a random set of training pairs based on known duplicates def canonicalImport(filename): preProcess = exampleIO.preProcess data_d = {} with open(filename) as f: reader = csv.DictReader(f) for i, row in enumerate(reader): clean_row = {k: preProcess(v) for (k, v) in viewitems(row)} data_d[filename + str(i)] = clean_row return data_d, reader.fieldnames def evaluateDuplicates(found_dupes, true_dupes): true_positives = found_dupes.intersection(true_dupes) false_positives = found_dupes.difference(true_dupes) uncovered_dupes = true_dupes.difference(found_dupes) print('found duplicate') print(len(found_dupes)) print('precision') print(1 - len(false_positives) / float(len(found_dupes))) print('recall') print(len(true_positives) / float(len(true_dupes))) settings_file = 'canonical_data_matching_learned_settings' data_1, header = canonicalImport('tests/datasets/restaurant-1.csv') data_2, _ = canonicalImport('tests/datasets/restaurant-2.csv') training_pairs = dedupe.trainingDataLink(data_1, data_2, 'unique_id', 5000) all_data = data_1.copy() all_data.update(data_2) duplicates_s = set() for _, pair in itertools.groupby(sorted(all_data.items(), key=lambda x: x[1]['unique_id']), key=lambda x: x[1]['unique_id']): pair = list(pair) if len(pair) == 2: a, b = pair duplicates_s.add(frozenset((a[0], b[0]))) t0 = time.time() print('number of known duplicate pairs', len(duplicates_s)) if os.path.exists(settings_file): with open(settings_file, 'rb') as f : deduper = dedupe.StaticRecordLink(f) else: fields = [{'field': 'name', 'type': 'String'}, {'field': 'address', 'type': 'String'}, {'field': 'cuisine', 'type': 'String'}, {'field': 'city','type' : 'String'} ] deduper = dedupe.RecordLink(fields) deduper.sample(data_1, data_2, 10000) deduper.markPairs(training_pairs) deduper.train() alpha = deduper.threshold(data_1, data_2) with open(settings_file, 'wb') as f: deduper.writeSettings(f, index=True) # print candidates print('clustering...') clustered_dupes = deduper.match(data_1, data_2, threshold=alpha) print('Evaluate Clustering') confirm_dupes = set(frozenset(pair) for pair, score in clustered_dupes) evaluateDuplicates(confirm_dupes, duplicates_s) print('ran in ', time.time() - t0, 'seconds')
{ "content_hash": "d97ed372375b2e501152cc0b03b1c6c7", "timestamp": "", "source": "github", "line_count": 115, "max_line_length": 75, "avg_line_length": 28.182608695652174, "alnum_prop": 0.6417772292502314, "repo_name": "tfmorris/dedupe", "id": "55a6f98d51a8a7a522ae6fb87797ad5abae08f4d", "size": "3283", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/canonical_matching.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "198229" }, { "name": "Shell", "bytes": "850" } ], "symlink_target": "" }
from __future__ import absolute_import from django import template register = template.Library() @register.filter(name='abs') def absolute_value(value): try: return abs(int(value)) except (ValueError, TypeError): return ''
{ "content_hash": "5985a76f447e839d4bdece464cb0016f", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 38, "avg_line_length": 18, "alnum_prop": 0.6746031746031746, "repo_name": "littleweaver/django-argus", "id": "321635ac4435517c46e8023625cf576619516b0b", "size": "252", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "argus/templatetags/argus.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "178980" }, { "name": "JavaScript", "bytes": "85374" }, { "name": "Python", "bytes": "56575" }, { "name": "Ruby", "bytes": "420" } ], "symlink_target": "" }
import os from functools import lru_cache from django.core.checks import Warning, register from willow.image import Image @lru_cache() def has_jpeg_support(): wagtail_jpg = os.path.join(os.path.dirname(__file__), 'check_files', 'wagtail.jpg') succeeded = True with open(wagtail_jpg, 'rb') as f: try: Image.open(f) except (IOError, Image.LoaderError): succeeded = False return succeeded @lru_cache() def has_png_support(): wagtail_png = os.path.join(os.path.dirname(__file__), 'check_files', 'wagtail.png') succeeded = True with open(wagtail_png, 'rb') as f: try: Image.open(f) except (IOError, Image.LoaderError): succeeded = False return succeeded @register('files') def image_library_check(app_configs, **kwargs): errors = [] if not has_jpeg_support(): errors.append( Warning( 'JPEG image support is not available', hint="Check that the 'libjpeg' library is installed, then reinstall Pillow." ) ) if not has_png_support(): errors.append( Warning( 'PNG image support is not available', hint="Check that the 'zlib' library is installed, then reinstall Pillow." ) ) return errors
{ "content_hash": "9a6e61cf490925317a2f043e42a06165", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 92, "avg_line_length": 23.964912280701753, "alnum_prop": 0.5834553440702782, "repo_name": "torchbox/wagtail", "id": "9f8311d0094410dbe502c8daca5b0df252ce7e14", "size": "1366", "binary": false, "copies": "3", "ref": "refs/heads/stable/2.15.x", "path": "wagtail/images/checks.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "178240" }, { "name": "HTML", "bytes": "307456" }, { "name": "JavaScript", "bytes": "123792" }, { "name": "Makefile", "bytes": "685" }, { "name": "Python", "bytes": "2786743" }, { "name": "Shell", "bytes": "7997" } ], "symlink_target": "" }
"""The tests for the TTS component.""" from http import HTTPStatus from unittest.mock import PropertyMock, patch import pytest import voluptuous as vol from homeassistant.components import media_source, tts from homeassistant.components.demo.tts import DemoProvider from homeassistant.components.media_player import ( ATTR_MEDIA_ANNOUNCE, ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, DOMAIN as DOMAIN_MP, SERVICE_PLAY_MEDIA, MediaType, ) from homeassistant.config import async_process_ha_core_config from homeassistant.exceptions import HomeAssistantError from homeassistant.setup import async_setup_component from homeassistant.util.network import normalize_url from tests.common import assert_setup_component, async_mock_service ORIG_WRITE_TAGS = tts.SpeechManager.write_tags async def get_media_source_url(hass, media_content_id): """Get the media source url.""" if media_source.DOMAIN not in hass.config.components: assert await async_setup_component(hass, media_source.DOMAIN, {}) resolved = await media_source.async_resolve_media(hass, media_content_id, None) return resolved.url @pytest.fixture def demo_provider(): """Demo TTS provider.""" return DemoProvider("en") @pytest.fixture(autouse=True) async def internal_url_mock(hass): """Mock internal URL of the instance.""" await async_process_ha_core_config( hass, {"internal_url": "http://example.local:8123"}, ) @pytest.fixture async def setup_tts(hass): """Mock TTS.""" with patch("homeassistant.components.demo.async_setup", return_value=True): assert await async_setup_component( hass, tts.DOMAIN, {"tts": {"platform": "demo"}} ) await hass.async_block_till_done() async def test_setup_component_demo(hass, setup_tts): """Set up the demo platform with defaults.""" assert hass.services.has_service(tts.DOMAIN, "demo_say") assert hass.services.has_service(tts.DOMAIN, "clear_cache") assert f"{tts.DOMAIN}.demo" in hass.config.components async def test_setup_component_demo_no_access_cache_folder(hass, mock_init_cache_dir): """Set up the demo platform with defaults.""" config = {tts.DOMAIN: {"platform": "demo"}} mock_init_cache_dir.side_effect = OSError(2, "No access") assert not await async_setup_component(hass, tts.DOMAIN, config) assert not hass.services.has_service(tts.DOMAIN, "demo_say") assert not hass.services.has_service(tts.DOMAIN, "clear_cache") async def test_setup_component_and_test_service(hass, empty_cache_dir): """Set up the demo platform and call service.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_ANNOUNCE] is True assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ) await hass.async_block_till_done() assert ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ).is_file() async def test_setup_component_and_test_service_with_config_language( hass, empty_cache_dir ): """Set up the demo platform and call service.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo", "language": "de"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3" ) await hass.async_block_till_done() assert ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3" ).is_file() async def test_setup_component_and_test_service_with_config_language_special( hass, empty_cache_dir ): """Set up the demo platform and call service with extend language.""" import homeassistant.components.demo.tts as demo_tts demo_tts.SUPPORT_LANGUAGES.append("en_US") calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo", "language": "en_US"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3" ) await hass.async_block_till_done() assert ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3" ).is_file() async def test_setup_component_and_test_service_with_wrong_conf_language(hass): """Set up the demo platform and call service with wrong config.""" config = {tts.DOMAIN: {"platform": "demo", "language": "ru"}} with assert_setup_component(0, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) async def test_setup_component_and_test_service_with_service_language( hass, empty_cache_dir ): """Set up the demo platform and call service.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", tts.ATTR_LANGUAGE: "de", }, blocking=True, ) assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3" ) await hass.async_block_till_done() assert ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3" ).is_file() async def test_setup_component_test_service_with_wrong_service_language( hass, empty_cache_dir ): """Set up the demo platform and call service.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) with pytest.raises(HomeAssistantError): await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", tts.ATTR_LANGUAGE: "lang", }, blocking=True, ) assert len(calls) == 0 assert not ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_lang_-_demo.mp3" ).is_file() async def test_setup_component_and_test_service_with_service_options( hass, empty_cache_dir ): """Set up the demo platform and call service with options.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", tts.ATTR_LANGUAGE: "de", tts.ATTR_OPTIONS: {"voice": "alex", "age": 5}, }, blocking=True, ) opt_hash = tts._hash_options({"voice": "alex", "age": 5}) assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == f"/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3" ) await hass.async_block_till_done() assert ( empty_cache_dir / f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3" ).is_file() async def test_setup_component_and_test_with_service_options_def(hass, empty_cache_dir): """Set up the demo platform and call service with default options.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN), patch( "homeassistant.components.demo.tts.DemoProvider.default_options", new_callable=PropertyMock(return_value={"voice": "alex"}), ): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", tts.ATTR_LANGUAGE: "de", }, blocking=True, ) opt_hash = tts._hash_options({"voice": "alex"}) assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == f"/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3" ) await hass.async_block_till_done() assert ( empty_cache_dir / f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3" ).is_file() async def test_setup_component_and_test_service_with_service_options_wrong( hass, empty_cache_dir ): """Set up the demo platform and call service with wrong options.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) with pytest.raises(HomeAssistantError): await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", tts.ATTR_LANGUAGE: "de", tts.ATTR_OPTIONS: {"speed": 1}, }, blocking=True, ) opt_hash = tts._hash_options({"speed": 1}) assert len(calls) == 0 await hass.async_block_till_done() assert not ( empty_cache_dir / f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3" ).is_file() async def test_setup_component_and_test_service_with_base_url_set(hass): """Set up the demo platform with ``base_url`` set and call service.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo", "base_url": "http://fnord"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == "http://fnord" "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491" "_en_-_demo.mp3" ) async def test_setup_component_and_test_service_clear_cache(hass, empty_cache_dir): """Set up the demo platform and call service clear cache.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) # To make sure the file is persisted assert len(calls) == 1 await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) await hass.async_block_till_done() assert ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ).is_file() await hass.services.async_call( tts.DOMAIN, tts.SERVICE_CLEAR_CACHE, {}, blocking=True ) assert not ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ).is_file() async def test_setup_component_and_test_service_with_receive_voice( hass, demo_provider, hass_client ): """Set up the demo platform and call service and receive voice.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) message = "There is someone at the door." await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: message, }, blocking=True, ) assert len(calls) == 1 url = await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) client = await hass_client() req = await client.get(url) _, demo_data = demo_provider.get_tts_audio("bla", "en") demo_data = tts.SpeechManager.write_tags( "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3", demo_data, demo_provider, message, "en", None, ) assert req.status == HTTPStatus.OK assert await req.read() == demo_data extension, data = await tts.async_get_media_source_audio( hass, calls[0].data[ATTR_MEDIA_CONTENT_ID] ) assert extension == "mp3" assert demo_data == data async def test_setup_component_and_test_service_with_receive_voice_german( hass, demo_provider, hass_client ): """Set up the demo platform and call service and receive voice.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo", "language": "de"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) assert len(calls) == 1 url = await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) client = await hass_client() req = await client.get(url) _, demo_data = demo_provider.get_tts_audio("bla", "de") demo_data = tts.SpeechManager.write_tags( "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3", demo_data, demo_provider, "There is someone at the door.", "de", None, ) assert req.status == HTTPStatus.OK assert await req.read() == demo_data async def test_setup_component_and_web_view_wrong_file(hass, hass_client): """Set up the demo platform and receive wrong file from web.""" config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) client = await hass_client() url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" req = await client.get(url) assert req.status == HTTPStatus.NOT_FOUND async def test_setup_component_and_web_view_wrong_filename(hass, hass_client): """Set up the demo platform and receive wrong filename from web.""" config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) client = await hass_client() url = "/api/tts_proxy/265944dsk32c1b2a621be5930510bb2cd_en_-_demo.mp3" req = await client.get(url) assert req.status == HTTPStatus.NOT_FOUND async def test_setup_component_test_without_cache(hass, empty_cache_dir): """Set up demo platform without cache.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo", "cache": False}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) assert len(calls) == 1 await hass.async_block_till_done() assert not ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ).is_file() async def test_setup_component_test_with_cache_call_service_without_cache( hass, empty_cache_dir ): """Set up demo platform with cache and call service without cache.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = {tts.DOMAIN: {"platform": "demo", "cache": True}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", tts.ATTR_CACHE: False, }, blocking=True, ) assert len(calls) == 1 await hass.async_block_till_done() assert not ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ).is_file() async def test_setup_component_test_with_cache_dir( hass, empty_cache_dir, demo_provider ): """Set up demo platform with cache and call service without cache.""" calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) _, demo_data = demo_provider.get_tts_audio("bla", "en") cache_file = ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ) with open(cache_file, "wb") as voice_file: voice_file.write(demo_data) config = {tts.DOMAIN: {"platform": "demo", "cache": True}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) with patch( "homeassistant.components.demo.tts.DemoProvider.get_tts_audio", return_value=(None, None), ): await hass.services.async_call( tts.DOMAIN, "demo_say", { "entity_id": "media_player.something", tts.ATTR_MESSAGE: "There is someone at the door.", }, blocking=True, ) assert len(calls) == 1 assert ( await get_media_source_url(hass, calls[0].data[ATTR_MEDIA_CONTENT_ID]) == "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ) async def test_setup_component_test_with_error_on_get_tts(hass): """Set up demo platform with wrong get_tts_audio.""" config = {tts.DOMAIN: {"platform": "demo"}} with assert_setup_component(1, tts.DOMAIN), patch( "homeassistant.components.demo.tts.DemoProvider.get_tts_audio", return_value=(None, None), ): assert await async_setup_component(hass, tts.DOMAIN, config) async def test_setup_component_load_cache_retrieve_without_mem_cache( hass, demo_provider, empty_cache_dir, hass_client ): """Set up component and load cache and get without mem cache.""" _, demo_data = demo_provider.get_tts_audio("bla", "en") cache_file = ( empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" ) with open(cache_file, "wb") as voice_file: voice_file.write(demo_data) config = {tts.DOMAIN: {"platform": "demo", "cache": True}} with assert_setup_component(1, tts.DOMAIN): assert await async_setup_component(hass, tts.DOMAIN, config) client = await hass_client() url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3" req = await client.get(url) assert req.status == HTTPStatus.OK assert await req.read() == demo_data async def test_setup_component_and_web_get_url(hass, hass_client): """Set up the demo platform and receive file from web.""" config = {tts.DOMAIN: {"platform": "demo"}} await async_setup_component(hass, tts.DOMAIN, config) client = await hass_client() url = "/api/tts_get_url" data = {"platform": "demo", "message": "There is someone at the door."} req = await client.post(url, json=data) assert req.status == HTTPStatus.OK response = await req.json() assert response == { "url": "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3", "path": "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3", } async def test_setup_component_and_web_get_url_bad_config(hass, hass_client): """Set up the demo platform and receive wrong file from web.""" config = {tts.DOMAIN: {"platform": "demo"}} await async_setup_component(hass, tts.DOMAIN, config) client = await hass_client() url = "/api/tts_get_url" data = {"message": "There is someone at the door."} req = await client.post(url, json=data) assert req.status == HTTPStatus.BAD_REQUEST async def test_tags_with_wave(hass, demo_provider): """Set up the demo platform and call service and receive voice.""" # below data represents an empty wav file demo_data = bytes.fromhex( "52 49 46 46 24 00 00 00 57 41 56 45 66 6d 74 20 10 00 00 00 01 00 02 00" + "22 56 00 00 88 58 01 00 04 00 10 00 64 61 74 61 00 00 00 00" ) tagged_data = ORIG_WRITE_TAGS( "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.wav", demo_data, demo_provider, "AI person is in front of your door.", "en", None, ) assert tagged_data != demo_data @pytest.mark.parametrize( "value", ( "http://example.local:8123", "http://example.local", "http://example.local:80", "https://example.com", "https://example.com:443", "https://example.com:8123", ), ) def test_valid_base_url(value): """Test we validate base urls.""" assert tts.valid_base_url(value) == normalize_url(value) # Test we strip trailing `/` assert tts.valid_base_url(value + "/") == normalize_url(value) @pytest.mark.parametrize( "value", ( "http://example.local:8123/sub-path", "http://example.local/sub-path", "https://example.com/sub-path", "https://example.com:8123/sub-path", "mailto:some@email", "http:example.com", "http:/example.com", "http//example.com", "example.com", ), ) def test_invalid_base_url(value): """Test we catch bad base urls.""" with pytest.raises(vol.Invalid): tts.valid_base_url(value) @pytest.mark.parametrize( "engine,language,options,cache,result_engine,result_query", ( (None, None, None, None, "demo", ""), (None, "de", None, None, "demo", "language=de"), (None, "de", {"voice": "henk"}, None, "demo", "language=de&voice=henk"), (None, "de", None, True, "demo", "cache=true&language=de"), ), ) async def test_generate_media_source_id( hass, setup_tts, engine, language, options, cache, result_engine, result_query ): """Test generating a media source ID.""" media_source_id = tts.generate_media_source_id( hass, "msg", engine, language, options, cache ) assert media_source_id.startswith("media-source://tts/") _, _, engine_query = media_source_id.rpartition("/") engine, _, query = engine_query.partition("?") assert engine == result_engine assert query.startswith("message=msg") assert query[12:] == result_query @pytest.mark.parametrize( "engine,language,options", ( ("not-loaded-engine", None, None), (None, "unsupported-language", None), (None, None, {"option": "not-supported"}), ), ) async def test_generate_media_source_id_invalid_options( hass, setup_tts, engine, language, options ): """Test generating a media source ID.""" with pytest.raises(HomeAssistantError): tts.generate_media_source_id(hass, "msg", engine, language, options, None)
{ "content_hash": "69a68345904a81206cfe5934485b58da", "timestamp": "", "source": "github", "line_count": 792, "max_line_length": 112, "avg_line_length": 33.08838383838384, "alnum_prop": 0.6359612302526139, "repo_name": "mezz64/home-assistant", "id": "f521cbda58d516c1b2aa67e3a768045262b0da3c", "size": "26206", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "tests/components/tts/test_init.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "52481895" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
from __future__ import annotations from django.test import TestCase from hc.lib.string import replace class StringTestCase(TestCase): def test_it_works(self): result = replace("$A is $B", {"$A": "aaa", "$B": "bbb"}) self.assertEqual(result, "aaa is bbb") def test_it_ignores_placeholders_in_values(self): result = replace("$A is $B", {"$A": "$B", "$B": "$A"}) self.assertEqual(result, "$B is $A") def test_it_ignores_overlapping_placeholders(self): result = replace("$$AB", {"$A": "", "$B": "text"}) self.assertEqual(result, "$B") def test_it_preserves_non_placeholder_dollar_signs(self): result = replace("$3.50", {"$A": "text"}) self.assertEqual(result, "$3.50")
{ "content_hash": "b9615f2e83dabd76c9aaec364467d3a1", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 64, "avg_line_length": 32.69565217391305, "alnum_prop": 0.5930851063829787, "repo_name": "healthchecks/healthchecks", "id": "a20280144d339aa9512678fb601a14d9056d76a2", "size": "752", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hc/lib/tests/test_string.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "65959" }, { "name": "Dockerfile", "bytes": "1088" }, { "name": "HTML", "bytes": "716643" }, { "name": "JavaScript", "bytes": "50869" }, { "name": "Less", "bytes": "211300" }, { "name": "Python", "bytes": "1043149" }, { "name": "Shell", "bytes": "1655" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = ''' author: NetApp Ansible Team (@carchi8py) <[email protected]> description: - Create/Delete cluster peer relations on ONTAP extends_documentation_fragment: - netapp.na_ontap module: na_ontap_cluster_peer options: state: choices: ['present', 'absent'] description: - Whether the specified cluster peer should exist or not. default: present source_intercluster_lifs: description: - List of intercluster addresses of the source cluster. - Used as peer-addresses in destination cluster. - All these intercluster lifs should belong to the source cluster. version_added: "2.8" aliases: - source_intercluster_lif dest_intercluster_lifs: description: - List of intercluster addresses of the destination cluster. - Used as peer-addresses in source cluster. - All these intercluster lifs should belong to the destination cluster. version_added: "2.8" aliases: - dest_intercluster_lif passphrase: description: - The arbitrary passphrase that matches the one given to the peer cluster. source_cluster_name: description: - The name of the source cluster name in the peer relation to be deleted. dest_cluster_name: description: - The name of the destination cluster name in the peer relation to be deleted. - Required for delete dest_hostname: description: - Destination cluster IP or hostname which needs to be peered - Required to complete the peering process at destination cluster. required: True dest_username: description: - Destination username. - Optional if this is same as source username. dest_password: description: - Destination password. - Optional if this is same as source password. short_description: NetApp ONTAP Manage Cluster peering version_added: "2.7" ''' EXAMPLES = """ - name: Create cluster peer na_ontap_cluster_peer: state: present source_intercluster_lifs: 1.2.3.4,1.2.3.5 dest_intercluster_lifs: 1.2.3.6,1.2.3.7 passphrase: XXXX hostname: "{{ netapp_hostname }}" username: "{{ netapp_username }}" password: "{{ netapp_password }}" dest_hostname: "{{ dest_netapp_hostname }}" - name: Delete cluster peer na_ontap_cluster_peer: state: absent source_cluster_name: test-source-cluster dest_cluster_name: test-dest-cluster hostname: "{{ netapp_hostname }}" username: "{{ netapp_username }}" password: "{{ netapp_password }}" dest_hostname: "{{ dest_netapp_hostname }}" """ RETURN = """ """ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native import ansible.module_utils.netapp as netapp_utils from ansible.module_utils.netapp_module import NetAppModule HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() class NetAppONTAPClusterPeer(object): """ Class with cluster peer methods """ def __init__(self): self.argument_spec = netapp_utils.na_ontap_host_argument_spec() self.argument_spec.update(dict( state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), source_intercluster_lifs=dict(required=False, type='list', aliases=['source_intercluster_lif']), dest_intercluster_lifs=dict(required=False, type='list', aliases=['dest_intercluster_lif']), passphrase=dict(required=False, type='str', no_log=True), dest_hostname=dict(required=True, type='str'), dest_username=dict(required=False, type='str'), dest_password=dict(required=False, type='str', no_log=True), source_cluster_name=dict(required=False, type='str'), dest_cluster_name=dict(required=False, type='str') )) self.module = AnsibleModule( argument_spec=self.argument_spec, required_together=[['source_intercluster_lifs', 'dest_intercluster_lifs']], required_if=[('state', 'absent', ['source_cluster_name', 'dest_cluster_name'])], supports_check_mode=True ) self.na_helper = NetAppModule() self.parameters = self.na_helper.set_parameters(self.module.params) if HAS_NETAPP_LIB is False: self.module.fail_json(msg="the python NetApp-Lib module is required") else: self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) # set destination server connection self.module.params['hostname'] = self.parameters['dest_hostname'] if self.parameters.get('dest_username'): self.module.params['username'] = self.parameters['dest_username'] if self.parameters.get('dest_password'): self.module.params['password'] = self.parameters['dest_password'] self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module) # reset to source host connection for asup logs self.module.params['hostname'] = self.parameters['hostname'] def cluster_peer_get_iter(self, cluster): """ Compose NaElement object to query current source cluster using peer-cluster-name and peer-addresses parameters :param cluster: type of cluster (source or destination) :return: NaElement object for cluster-get-iter with query """ cluster_peer_get = netapp_utils.zapi.NaElement('cluster-peer-get-iter') query = netapp_utils.zapi.NaElement('query') cluster_peer_info = netapp_utils.zapi.NaElement('cluster-peer-info') if cluster == 'source': peer_lifs, peer_cluster = 'dest_intercluster_lifs', 'dest_cluster_name' else: peer_lifs, peer_cluster = 'source_intercluster_lifs', 'source_cluster_name' if self.parameters.get(peer_lifs): peer_addresses = netapp_utils.zapi.NaElement('peer-addresses') for peer in self.parameters.get(peer_lifs): peer_addresses.add_new_child('remote-inet-address', peer) cluster_peer_info.add_child_elem(peer_addresses) if self.parameters.get(peer_cluster): cluster_peer_info.add_new_child('cluster-name', self.parameters[peer_cluster]) query.add_child_elem(cluster_peer_info) cluster_peer_get.add_child_elem(query) return cluster_peer_get def cluster_peer_get(self, cluster): """ Get current cluster peer info :param cluster: type of cluster (source or destination) :return: Dictionary of current cluster peer details if query successful, else return None """ cluster_peer_get_iter = self.cluster_peer_get_iter(cluster) result, cluster_info = None, dict() if cluster == 'source': server = self.server else: server = self.dest_server try: result = server.invoke_successfully(cluster_peer_get_iter, enable_tunneling=True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error fetching cluster peer %s: %s' % (self.parameters['dest_cluster_name'], to_native(error)), exception=traceback.format_exc()) # return cluster peer details if result.get_child_by_name('num-records') and \ int(result.get_child_content('num-records')) >= 1: cluster_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('cluster-peer-info') cluster_info['cluster_name'] = cluster_peer_info.get_child_content('cluster-name') peers = cluster_peer_info.get_child_by_name('peer-addresses') cluster_info['peer-addresses'] = [peer.get_content() for peer in peers.get_children()] return cluster_info return None def cluster_peer_delete(self, cluster): """ Delete a cluster peer on source or destination For source cluster, peer cluster-name = destination cluster name and vice-versa :param cluster: type of cluster (source or destination) :return: """ if cluster == 'source': server, peer_cluster_name = self.server, self.parameters['dest_cluster_name'] else: server, peer_cluster_name = self.dest_server, self.parameters['source_cluster_name'] cluster_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children( 'cluster-peer-delete', **{'cluster-name': peer_cluster_name}) try: server.invoke_successfully(cluster_peer_delete, enable_tunneling=True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error deleting cluster peer %s: %s' % (peer_cluster_name, to_native(error)), exception=traceback.format_exc()) def cluster_peer_create(self, cluster): """ Create a cluster peer on source or destination For source cluster, peer addresses = destination inter-cluster LIFs and vice-versa :param cluster: type of cluster (source or destination) :return: None """ cluster_peer_create = netapp_utils.zapi.NaElement.create_node_with_children('cluster-peer-create') if self.parameters.get('passphrase') is not None: cluster_peer_create.add_new_child('passphrase', self.parameters['passphrase']) peer_addresses = netapp_utils.zapi.NaElement('peer-addresses') if cluster == 'source': server, peer_address = self.server, self.parameters['dest_intercluster_lifs'] else: server, peer_address = self.dest_server, self.parameters['source_intercluster_lifs'] for each in peer_address: peer_addresses.add_new_child('remote-inet-address', each) cluster_peer_create.add_child_elem(peer_addresses) try: server.invoke_successfully(cluster_peer_create, enable_tunneling=True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error creating cluster peer %s: %s' % (peer_address, to_native(error)), exception=traceback.format_exc()) def apply(self): """ Apply action to cluster peer :return: None """ self.asup_log_for_cserver("na_ontap_cluster_peer") source = self.cluster_peer_get('source') destination = self.cluster_peer_get('destination') source_action = self.na_helper.get_cd_action(source, self.parameters) destination_action = self.na_helper.get_cd_action(destination, self.parameters) self.na_helper.changed = False # create only if expected cluster peer relation is not present on both source and destination clusters if source_action == 'create' and destination_action == 'create': self.cluster_peer_create('source') self.cluster_peer_create('destination') self.na_helper.changed = True # delete peer relation in cluster where relation is present else: if source_action == 'delete': self.cluster_peer_delete('source') self.na_helper.changed = True if destination_action == 'delete': self.cluster_peer_delete('destination') self.na_helper.changed = True self.module.exit_json(changed=self.na_helper.changed) def asup_log_for_cserver(self, event_name): """ Fetch admin vserver for the given cluster Create and Autosupport log event with the given module name :param event_name: Name of the event log :return: None """ results = netapp_utils.get_cserver(self.server) cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results) netapp_utils.ems_log_event(event_name, cserver) def main(): """ Execute action :return: None """ community_obj = NetAppONTAPClusterPeer() community_obj.apply() if __name__ == '__main__': main()
{ "content_hash": "14daa77a38ad76bb142112a5a3cca5a0", "timestamp": "", "source": "github", "line_count": 291, "max_line_length": 118, "avg_line_length": 42.97594501718213, "alnum_prop": 0.6349752118982889, "repo_name": "SergeyCherepanov/ansible", "id": "4bf15982084dba59c079cc2696b2f2f11235cab9", "size": "12647", "binary": false, "copies": "36", "ref": "refs/heads/master", "path": "ansible/ansible/modules/storage/netapp/na_ontap_cluster_peer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Shell", "bytes": "824" } ], "symlink_target": "" }
from __future__ import print_function import os import shutil import sys class WrongExtension(Exception): pass def splitext(filename): # not using os.path.splitext as it would return .gz instead of .tar.gz for ext in ".tar.gz", ".exe": if filename.endswith(ext): return filename[:-len(ext)], ext raise WrongExtension( "Unknown agent format for {0}. " "Must be either tar.gz or exe".format(filename)) def normalize_agent_name(filename): return filename.rpartition("_")[0].lower() def normalize_names(directory, target_dir): previous_targets = set() for fn in os.listdir(directory): try: fn, extension = splitext(fn) except WrongExtension: # Ignore files with extensions we don't like continue source = os.path.join(directory, fn + extension) target = os.path.join(target_dir, normalize_agent_name(fn) + extension) print('copying {} to {}'.format(source, target)) if target in previous_targets: raise RuntimeError( 'packages normalised to same target path!', target) previous_targets.add(target) shutil.copy(source, target) normalize_names(*sys.argv[1:3])
{ "content_hash": "b224b7c2a426581034caa2de4c201496", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 79, "avg_line_length": 29.209302325581394, "alnum_prop": 0.6337579617834395, "repo_name": "cloudify-cosmo/cloudify-manager", "id": "317e69110ca93c3e270dca6e354bf7668a14e320", "size": "1256", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "packaging/agents/copy_packages.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Clojure", "bytes": "4067" }, { "name": "Dockerfile", "bytes": "3843" }, { "name": "HTML", "bytes": "320" }, { "name": "Mako", "bytes": "494" }, { "name": "PLpgSQL", "bytes": "119062" }, { "name": "Python", "bytes": "3825971" }, { "name": "Shell", "bytes": "49121" } ], "symlink_target": "" }
from __future__ import print_function import os from catkin_tools.argument_parsing import add_cmake_and_make_and_catkin_make_args from catkin_tools.argument_parsing import add_context_args from catkin_tools.context import Context def prepare_arguments(parser): parser.description = "This verb is used to configure a catkin workspace's\ configuration and layout. Calling `catkin config` with no arguments will\ display the current config and affect no changes if a config already exists\ for the current workspace and profile." # Workspace / profile args add_context_args(parser) context_group = parser.add_argument_group('Workspace Context', 'Options affecting the context of the workspace.') add = context_group.add_argument add('--init', action='store_true', default=False, help='Initialize a workspace if it does not yet exist.') add = context_group.add_mutually_exclusive_group().add_argument add('--extend', '-e', dest='extend_path', type=str, help='Explicitly extend the result-space of another catkin workspace, ' 'overriding the value of $CMAKE_PREFIX_PATH.') add('--no-extend', dest='extend_path', action='store_const', const='', help='Un-set the explicit extension of another workspace as set by --extend.') add = context_group.add_argument add('--mkdirs', action='store_true', default=False, help='Create directories required by the configuration (e.g. source space) if they do not already exist.') spaces_group = parser.add_argument_group('Spaces', 'Location of parts of the catkin workspace.') add = spaces_group.add_mutually_exclusive_group().add_argument add('-s', '--source-space', default=None, help='The path to the source space.') add('--default-source-space', action='store_const', dest='source_space', default=None, const=Context.DEFAULT_SOURCE_SPACE, help='Use the default path to the source space ("src")') add = spaces_group.add_mutually_exclusive_group().add_argument add('-b', '--build-space', default=None, help='The path to the build space.') add('--default-build-space', action='store_const', dest='build_space', default=None, const=Context.DEFAULT_BUILD_SPACE, help='Use the default path to the build space ("build")') add = spaces_group.add_mutually_exclusive_group().add_argument add('-d', '--devel-space', default=None, help='Sets the target devel space') add('--default-devel-space', action='store_const', dest='devel_space', default=None, const=Context.DEFAULT_DEVEL_SPACE, help='Sets the default target devel space ("devel")') add = spaces_group.add_mutually_exclusive_group().add_argument add('-i', '--install-space', default=None, help='Sets the target install space') add('--default-install-space', action='store_const', dest='install_space', default=None, const=Context.DEFAULT_INSTALL_SPACE, help='Sets the default target install space ("install")') add = spaces_group.add_argument add('-x', '--space-suffix', help='Suffix for build, devel, and install space if they are not otherwise explicitly set.') devel_group = parser.add_argument_group( 'Devel Space', 'Options for configuring the structure of the devel space.') add = devel_group.add_mutually_exclusive_group().add_argument add('--isolate-devel', action='store_true', default=None, help='Build products from each catkin package into isolated devel spaces.') add('--merge-devel', dest='isolate_devel', action='store_false', default=None, help='Build products from each catkin package into a single merged devel spaces.') install_group = parser.add_argument_group( 'Install Space', 'Options for configuring the structure of the install space.') add = install_group.add_mutually_exclusive_group().add_argument add('--install', action='store_true', default=None, help='Causes each package to be installed to the install space.') add('--no-install', dest='install', action='store_false', default=None, help='Disables installing each package into the install space.') add = install_group.add_mutually_exclusive_group().add_argument add('--isolate-install', action='store_true', default=None, help='Install each catkin package into a separate install space.') add('--merge-install', dest='isolate_install', action='store_false', default=None, help='Install each catkin package into a single merged install space.') build_group = parser.add_argument_group('Build Options', 'Options for configuring the way packages are built.') add_cmake_and_make_and_catkin_make_args(build_group) return parser def main(opts): try: # Try to find a metadata directory to get context defaults # Otherwise use the specified directory context = Context.Load(opts.workspace, opts.profile, opts) if context.initialized() or opts.init: Context.Save(context) if opts.mkdirs and not context.source_space_exists(): os.makedirs(context.source_space_abs) print(context.summary()) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not configure catkin workspace: %s' % exc.message) return 1 return 0
{ "content_hash": "fd36267d218e09026d19b594a1cb2bea", "timestamp": "", "source": "github", "line_count": 111, "max_line_length": 117, "avg_line_length": 49.08108108108108, "alnum_prop": 0.6897944199706314, "repo_name": "xqms/catkin_tools", "id": "dd9bd26b39fbd6b9fdc260fb4d0b40c49d3d907f", "size": "6050", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "catkin_tools/verbs/catkin_config/cli.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Objective-C", "bytes": "3354" }, { "name": "Python", "bytes": "227706" }, { "name": "Shell", "bytes": "6713" } ], "symlink_target": "" }
import mock from ec2api.tests.unit import base from ec2api.tests.unit import fakes from ec2api.tests.unit import matchers from ec2api.tests.unit import tools class SnapshotTestCase(base.ApiTestCase): def test_describe_snapshots(self): self.cinder.volume_snapshots.list.return_value = [ fakes.OSSnapshot(fakes.OS_SNAPSHOT_1), fakes.OSSnapshot(fakes.OS_SNAPSHOT_2)] self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2, fakes.DB_VOLUME_2) resp = self.execute('DescribeSnapshots', {}) self.assertThat(resp, matchers.DictMatches( {'snapshotSet': [fakes.EC2_SNAPSHOT_1, fakes.EC2_SNAPSHOT_2]}, orderless_lists=True)) self.db_api.get_items.assert_any_call(mock.ANY, 'vol') self.db_api.get_items_by_ids = tools.CopyingMock( return_value=[fakes.DB_SNAPSHOT_1]) resp = self.execute('DescribeSnapshots', {'SnapshotId.1': fakes.ID_EC2_SNAPSHOT_1}) self.assertThat(resp, matchers.DictMatches( {'snapshotSet': [fakes.EC2_SNAPSHOT_1]}, orderless_lists=True)) self.db_api.get_items_by_ids.assert_called_once_with( mock.ANY, set([fakes.ID_EC2_SNAPSHOT_1])) self.check_filtering( 'DescribeSnapshots', 'snapshotSet', [ # TODO(ft): declare a constant for the description in fakes ('description', 'fake description'), ('owner-id', fakes.ID_OS_PROJECT), ('progress', '100%'), ('snapshot-id', fakes.ID_EC2_SNAPSHOT_1), ('start-time', fakes.TIME_CREATE_SNAPSHOT_2), ('status', 'completed'), ('volume-id', fakes.ID_EC2_VOLUME_2), # TODO(ft): declare a constant for the volume size in fakes ('volume-size', 1) ]) self.check_tag_support( 'DescribeSnapshots', 'snapshotSet', fakes.ID_EC2_SNAPSHOT_1, 'snapshotId') def test_describe_snapshots_auto_remove(self): self.cinder.volume_snapshots.list.return_value = [] self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_VOLUME_2) resp = self.execute('DescribeSnapshots', {}) self.assertThat(resp, matchers.DictMatches( {'snapshotSet': []}, orderless_lists=True)) self.db_api.get_items.assert_any_call(mock.ANY, 'vol') self.db_api.get_items.assert_any_call(mock.ANY, 'snap') self.db_api.delete_item.assert_any_call(mock.ANY, fakes.ID_EC2_SNAPSHOT_1) def test_describe_snapshots_invalid_parameters(self): self.cinder.volume_snapshots.list.return_value = [ fakes.OSSnapshot(fakes.OS_SNAPSHOT_1), fakes.OSSnapshot(fakes.OS_SNAPSHOT_2)] self.assert_execution_error( 'InvalidSnapshot.NotFound', 'DescribeSnapshots', {'SnapshotId.1': fakes.random_ec2_id('snap')}) self.cinder.volume_snapshots.list.side_effect = lambda: [] self.assert_execution_error( 'InvalidSnapshot.NotFound', 'DescribeSnapshots', {'SnapshotId.1': fakes.ID_EC2_SNAPSHOT_1}) def test_create_snapshot_from_volume(self): self.cinder.volume_snapshots.create.return_value = ( fakes.OSSnapshot(fakes.OS_SNAPSHOT_1)) self.db_api.add_item.side_effect = ( tools.get_db_api_add_item(fakes.ID_EC2_SNAPSHOT_1)) self.set_mock_db_items(fakes.DB_VOLUME_2) self.cinder.volumes.get.side_effect = ( lambda vol_id: ( fakes.OSVolume(fakes.OS_VOLUME_2) if vol_id == fakes.ID_OS_VOLUME_2 else None)) resp = self.execute( 'CreateSnapshot', {'VolumeId': fakes.ID_EC2_VOLUME_2}) self.assertThat(fakes.EC2_SNAPSHOT_1, matchers.DictMatches(resp)) self.db_api.add_item.assert_called_once_with( mock.ANY, 'snap', tools.purge_dict(fakes.DB_SNAPSHOT_1, ('id',))) self.cinder.volume_snapshots.create.assert_called_once_with( fakes.ID_OS_VOLUME_2, force=True) def test_format_snapshot_maps_status(self): fake_snapshot = fakes.OSSnapshot(fakes.OS_SNAPSHOT_1) self.cinder.volume_snapshots.list.return_value = [fake_snapshot] self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_VOLUME_2) fake_snapshot.status = 'new' resp = self.execute('DescribeSnapshots', {}) self.assertEqual('pending', resp['snapshotSet'][0]['status']) fake_snapshot.status = 'creating' resp = self.execute('DescribeSnapshots', {}) self.assertEqual('pending', resp['snapshotSet'][0]['status']) fake_snapshot.status = 'available' resp = self.execute('DescribeSnapshots', {}) self.assertEqual('completed', resp['snapshotSet'][0]['status']) fake_snapshot.status = 'active' resp = self.execute('DescribeSnapshots', {}) self.assertEqual('completed', resp['snapshotSet'][0]['status']) fake_snapshot.status = 'deleting' resp = self.execute('DescribeSnapshots', {}) self.assertEqual('pending', resp['snapshotSet'][0]['status']) fake_snapshot.status = 'error' resp = self.execute('DescribeSnapshots', {}) self.assertEqual('error', resp['snapshotSet'][0]['status']) fake_snapshot.status = 'banana' resp = self.execute('DescribeSnapshots', {}) self.assertEqual('banana', resp['snapshotSet'][0]['status'])
{ "content_hash": "088c2e97ae7e97fb72a4b479dddd3388", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 74, "avg_line_length": 40.84057971014493, "alnum_prop": 0.6022001419446416, "repo_name": "vishnu-kumar/ec2-api", "id": "1f6b590d5cf66f19299b827e6ee371421c19bf9c", "size": "6225", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "ec2api/tests/unit/test_snapshot.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1702647" }, { "name": "Shell", "bytes": "29444" } ], "symlink_target": "" }
""" Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: [email protected] Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTRevisionApproverInfo(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "date": (datetime,), # noqa: E501 "id": (str,), # noqa: E501 "name": (str,), # noqa: E501 } @staticmethod def discriminator(): return None attribute_map = { "date": "date", # noqa: E501 "id": "id", # noqa: E501 "name": "name", # noqa: E501 } @staticmethod def _composed_schemas(): return None required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """bt_revision_approver_info.BTRevisionApproverInfo - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. date (datetime): [optional] # noqa: E501 id (str): [optional] # noqa: E501 name (str): [optional] # noqa: E501 """ self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration for var_name, var_value in six.iteritems(kwargs): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
{ "content_hash": "06b0c20d798650f534ee2d626aa0fe4f", "timestamp": "", "source": "github", "line_count": 147, "max_line_length": 88, "avg_line_length": 32.816326530612244, "alnum_prop": 0.5677860696517413, "repo_name": "onshape-public/onshape-clients", "id": "e1afa5739f4a836e8a2c4d2b0e1eab5f309bcc9c", "size": "4841", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/onshape_client/oas/models/bt_revision_approver_info.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "4873" }, { "name": "Go", "bytes": "59674" }, { "name": "HTML", "bytes": "3851790" }, { "name": "JavaScript", "bytes": "2217" }, { "name": "Makefile", "bytes": "559" }, { "name": "Python", "bytes": "7560009" }, { "name": "Shell", "bytes": "3475" }, { "name": "TypeScript", "bytes": "1412661" } ], "symlink_target": "" }
''' Wrapper for ark-tweet-nlp ''' import subprocess import shlex import os from django.conf import settings import logging class POSTagger(object): def __init__(self, callback): self._log = logging.getLogger("POSTagger") self.tagger_command = os.path.expanduser(settings.POS_TAGGER['command']) self.queue = [] self.queue_max_size = settings.POS_TAGGER['max_queue'] self._log.info("Command: %s" % self.tagger_command) self._log.info("Max queue size: %s" % self.queue_max_size) self._cb = callback def enqueue(self, post=None, text=None, forceProcessing=False): if post is not None and text is not None: self.queue.append([post, text]) if forceProcessing or len(self.queue) >= self.queue_max_size: tmpqueue = self.queue self.queue = [] self.processTexts(tmpqueue) pass def processTexts(self, texts): self._log.info("Processing %s texts" % len(texts)) raw = self.getRawTexts(texts) self.runCommand(texts, raw) def runCommand(self, texts, raw): proc_args = shlex.split(self.tagger_command) proc = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p_stdout, p_stderr = proc.communicate(raw) if p_stderr is not None: self._log.info(p_stderr.strip().split('\n')[1]) self.parseResult(texts, p_stdout) def parseResult(self, texts, taggerResult): all_results = [] cur_result = [] for line in taggerResult.split('\n'): if line == '': if len(cur_result) > 0: all_results.append(cur_result) cur_result = [] continue token, tag, confidence = line.split('\t') confidence = float(confidence) cur_result.append([token, tag, confidence]) self._log.info("Received tag-information for %s documents. Expected: %s" % (len(all_results), len(texts))) if (len(all_results) != len(texts)): self._log.warn("Assertion failed.") return #exit() for idx in range(len(all_results)): texts[idx].append(all_results[idx]) self.reportBack(texts) def reportBack(self, texts): for post, text, tags in texts: self._cb(post, text, tags) def getRawTexts(self, texts): raw_texts = [] for post, text in texts: raw_text = ' '.join(text).replace('\r', ' ').replace('\n', ' ').strip() if raw_text == "": raw_text = "EMPTY_TEXT" raw_texts.append(raw_text) return "\n".join(raw_texts)
{ "content_hash": "910f4c4e32c84746951b75d7216e6fd6", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 114, "avg_line_length": 33.59756097560975, "alnum_prop": 0.5720508166969147, "repo_name": "FrankGrimm/text-insights", "id": "dded78cff0185c0c96b65ae8a6bee92d3354d7b2", "size": "2779", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web/ti/management/commands/postagger.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "103299" }, { "name": "JavaScript", "bytes": "1470677" }, { "name": "PHP", "bytes": "36056" }, { "name": "Python", "bytes": "130473" }, { "name": "Shell", "bytes": "1368" } ], "symlink_target": "" }
from __future__ import unicode_literals from __future__ import absolute_import import datetime import json import logging import os import random import re import sys import time import Queue import threading import shelve import uuid import urllib2 from geopy.geocoders import GoogleV3 from pgoapi import PGoApi from pgoapi.utilities import f2i, get_cell_ids from s2sphere import Cell, CellId, LatLng from . import cell_workers from .base_task import BaseTask from .plugin_loader import PluginLoader from .api_wrapper import ApiWrapper from .cell_workers.utils import distance from .event_manager import EventManager from .human_behaviour import sleep from .item_list import Item from .metrics import Metrics from .sleep_schedule import SleepSchedule from pokemongo_bot.event_handlers import SocketIoHandler, LoggingHandler, SocialHandler, CaptchaHandler from pokemongo_bot.socketio_server.runner import SocketIoRunner from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl from pokemongo_bot.base_dir import _base_dir from .worker_result import WorkerResult from .tree_config_builder import ConfigException from .tree_config_builder import MismatchTaskApiVersion from .tree_config_builder import TreeConfigBuilder from .inventory import init_inventory, player from sys import platform as _platform from pgoapi.protos.pogoprotos.enums import badge_type_pb2 from pgoapi.exceptions import AuthException, NotLoggedInException, ServerSideRequestThrottlingException, ServerBusyOrOfflineException, NoPlayerPositionSetException, HashingOfflineException from pgoapi.hash_server import HashServer class FileIOException(Exception): pass class PokemonGoBot(object): @property def position(self): return self.api.actual_lat, self.api.actual_lng, self.api.actual_alt @property def noised_position(self): return self.api.noised_lat, self.api.noised_lng, self.api.noised_alt #@position.setter # these should be called through api now that gps replication is there... #def position(self, position_tuple): # self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple @property def player_data(self): """ Returns the player data as received from the API. :return: The player data. :rtype: dict """ return self._player @property def inbox(self): """ Returns the inbox data as received from the API. :return: The inbox data. :rtype: dict """ return self._inbox @property def stardust(self): dust = filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0] if 'amount' in dust: return dust['amount'] else: return 0 @stardust.setter def stardust(self, value): dust = filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0] if 'amount' in dust: dust['amount'] = value def __init__(self, db, config): self.database = db self.config = config super(PokemonGoBot, self).__init__() self.fort_timeouts = dict() self.pokemon_list = json.load( open(os.path.join(_base_dir, 'data', 'pokemon.json')) ) self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json'))) # @var Metrics self.metrics = Metrics(self) self.latest_inventory = None self.cell = None self.recent_forts = [None] * config.forts_max_circle_size self.tick_count = 0 self.softban = False self.wake_location = None self.start_position = None self.last_map_object = None self.last_time_map_object = 0 self.logger = logging.getLogger(type(self).__name__) self.alt = self.config.gps_default_altitude # Make our own copy of the workers for this instance self.workers = [] # Theading setup for file writing self.web_update_queue = Queue.Queue(maxsize=1) self.web_update_thread = threading.Thread(target=self.update_web_location_worker) self.web_update_thread.start() # Heartbeat limiting self.heartbeat_threshold = self.config.heartbeat_threshold self.heartbeat_counter = 0 self.last_heartbeat = time.time() self.hb_locked = False # lock hb on snip # Inventory refresh limiting self.inventory_refresh_threshold = 10 self.inventory_refresh_counter = 0 self.last_inventory_refresh = time.time() # Allow user to change hash service if self.config.hashendpoint: HashServer.endpoint = self.config.hashendpoint # Catch on/off self.catch_disabled = False self.capture_locked = False # lock catching while moving to VIP pokemon # Inform bot if there's a response self.empty_response = False client_id_file_path = os.path.join(_base_dir, 'data', 'mqtt_client_id') saved_info = shelve.open(client_id_file_path) key = 'client_id'.encode('utf-8') if key in saved_info: self.config.client_id = saved_info[key] else: self.config.client_id = str(uuid.uuid4()) saved_info[key] = self.config.client_id saved_info.close() def start(self, bot): self._setup_event_system(bot) self.sleep_schedule = SleepSchedule(self, self.config.sleep_schedule) if self.config.sleep_schedule else None if self.sleep_schedule: self.sleep_schedule.work() self._setup_api() self._load_recent_forts() init_inventory(self) self.display_player_info() self._print_character_info() if self.config.pokemon_bag_show_at_start and self.config.pokemon_bag_pokemon_info: self._print_list_pokemon() random.seed() def _setup_event_system(self, bot): handlers = [] color = self.config.logging and 'color' in self.config.logging and self.config.logging['color'] debug = self.config.debug handlers.append(LoggingHandler(color, debug)) handlers.append(SocialHandler(self)) handlers.append(CaptchaHandler(self, self.config.solve_captcha)) if self.config.websocket_server_url: if self.config.websocket_start_embedded_server: self.sio_runner = SocketIoRunner(self.config.websocket_server_url) self.sio_runner.start_listening_async() websocket_handler = SocketIoHandler( self, self.config.websocket_server_url ) handlers.append(websocket_handler) if self.config.websocket_remote_control: remote_control = WebsocketRemoteControl(self).start() # @var EventManager self.event_manager = EventManager(bot, self.config.walker_limit_output, *handlers) self._register_events() if self.config.show_events: self.event_manager.event_report() sys.exit(1) # Registering event: # self.event_manager.register_event("location", parameters=['lat', 'lng']) # # Emitting event should be enough to add logging and send websocket # message: : # self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}), def _register_events(self): self.event_manager.register_event( 'location_found', parameters=('position', 'location') ) self.event_manager.register_event('api_error') self.event_manager.register_event('config_error') self.event_manager.register_event('captcha') self.event_manager.register_event('login_started') self.event_manager.register_event('login_failed') self.event_manager.register_event('login_successful') self.event_manager.register_event('niantic_warning') self.event_manager.register_event('set_start_location') self.event_manager.register_event('load_cached_location') self.event_manager.register_event('location_cache_ignored') self.event_manager.register_event('debug') self.event_manager.register_event('refuse_to_sit') self.event_manager.register_event('reset_destination') self.event_manager.register_event('new_destination') self.event_manager.register_event('moving_to_destination') self.event_manager.register_event('arrived_at_destination') self.event_manager.register_event('staying_at_destination') self.event_manager.register_event('buddy_pokemon', parameters=('pokemon', 'iv', 'cp')) self.event_manager.register_event('buddy_reward', parameters=('pokemon', 'family', 'candy_earned', 'candy')) self.event_manager.register_event('buddy_walked', parameters=('pokemon', 'distance_walked', 'distance_needed')) # ignore candy above threshold self.event_manager.register_event( 'ignore_candy_above_thresold', parameters=( 'name', 'amount', 'threshold' ) ) self.event_manager.register_event('followpath_output_disabled') self.event_manager.register_event( 'position_update', parameters=( 'current_position', 'last_position', 'distance', # optional 'distance_unit' # optional ) ) self.event_manager.register_event( 'path_lap_update', parameters=( 'number_lap', 'number_lap_max' ) ) self.event_manager.register_event( 'path_lap_end', parameters=( 'duration', 'resume' ) ) self.event_manager.register_event('location_cache_error') self.event_manager.register_event('security_check') self.event_manager.register_event('bot_start') self.event_manager.register_event('bot_exit') self.event_manager.register_event('bot_interrupted') # sleep stuff self.event_manager.register_event( 'next_sleep', parameters=( 'time', 'duration' ) ) self.event_manager.register_event( 'bot_sleep', parameters=( 'time_hms', 'wake' ) ) # random pause self.event_manager.register_event( 'next_random_pause', parameters=( 'time', 'duration' ) ) self.event_manager.register_event( 'bot_random_pause', parameters=( 'time_hms', 'resume' ) ) # recycle stuff self.event_manager.register_event( 'next_force_recycle', parameters=( 'time' ) ) self.event_manager.register_event('force_recycle') # random alive pause self.event_manager.register_event( 'next_random_alive_pause', parameters=( 'time', 'duration' ) ) self.event_manager.register_event( 'bot_random_alive_pause', parameters=( 'time_hms', 'resume' ) ) # fort stuff self.event_manager.register_event( 'spun_fort', parameters=( 'fort_id', 'latitude', 'longitude' ) ) self.event_manager.register_event( 'lured_pokemon_found', parameters=( 'fort_id', 'fort_name', 'encounter_id', 'latitude', 'longitude' ) ) self.event_manager.register_event( 'moving_to_hunter_target', parameters=( 'target_name', 'distance' ) ) self.event_manager.register_event( 'moving_to_fort', parameters=( 'fort_name', 'target_type', 'distance' ) ) self.event_manager.register_event( 'moving_to_lured_fort', parameters=( 'fort_name', 'target_type', 'distance', 'lure_distance' ) ) self.event_manager.register_event( 'spun_pokestop', parameters=( 'pokestop', 'exp', 'items', 'stop_kind', 'spin_amount_now' ) ) self.event_manager.register_event( 'pokestop_empty', parameters=('pokestop',) ) self.event_manager.register_event( 'pokestop_out_of_range', parameters=('pokestop',) ) self.event_manager.register_event( 'pokestop_on_cooldown', parameters=('pokestop', 'minutes_left') ) self.event_manager.register_event( 'unknown_spin_result', parameters=('status_code',) ) self.event_manager.register_event('pokestop_searching_too_often') self.event_manager.register_event('arrived_at_fort') # pokemon stuff self.event_manager.register_event( 'catchable_pokemon', parameters=( 'pokemon_id', 'spawn_point_id', 'encounter_id', 'latitude', 'longitude', 'expiration_timestamp_ms', 'pokemon_name' ) ) self.event_manager.register_event( 'incensed_pokemon_found', parameters=( 'pokemon_id', 'encounter_id', 'encounter_location', 'latitude', 'longitude' ) ) self.event_manager.register_event( 'pokemon_appeared', parameters=( 'pokemon', 'ncp', 'cp', 'iv', 'iv_display', 'encounter_id', 'latitude', 'longitude', 'pokemon_id', 'shiny' ) ) self.event_manager.register_event('no_pokeballs') self.event_manager.register_event('enough_ultraballs') self.event_manager.register_event('lure_success') self.event_manager.register_event('lure_failed') self.event_manager.register_event('lure_not_enough') self.event_manager.register_event('lure_info') self.event_manager.register_event( 'pokemon_catch_rate', parameters=( 'catch_rate', 'ball_name', 'berry_name', 'berry_count' ) ) self.event_manager.register_event( 'threw_berry', parameters=( 'berry_name', 'ball_name', 'new_catch_rate' ) ) self.event_manager.register_event( 'threw_pokeball', parameters=( 'throw_type', 'spin_label', 'ball_name', 'success_percentage', 'count_left' ) ) self.event_manager.register_event( 'pokemon_capture_failed', parameters=('pokemon',) ) self.event_manager.register_event( 'pokemon_vanished', parameters=( 'pokemon', 'encounter_id', 'latitude', 'longitude', 'pokemon_id' ) ) self.event_manager.register_event( 'vanish_limit_reached', parameters=( 'duration', 'resume' ) ) self.event_manager.register_event('pokemon_not_in_range') self.event_manager.register_event('pokemon_inventory_full') self.event_manager.register_event( 'pokemon_caught', parameters=( 'pokemon', 'ncp', 'cp', 'iv', 'iv_display', 'exp', 'shiny', 'stardust', 'encounter_id', 'latitude', 'longitude', 'pokemon_id', 'daily_catch_limit', 'caught_last_24_hour', ) ) self.event_manager.register_event( 'pokemon_vip_caught', parameters=( 'pokemon', 'ncp', 'cp', 'iv', 'iv_display', 'exp', 'shiny', 'stardust', 'encounter_id', 'latitude', 'longitude', 'pokemon_id', 'daily_catch_limit', 'caught_last_24_hour', ) ) self.event_manager.register_event( 'pokemon_evolved', parameters=('pokemon', 'new', 'iv', 'old_cp', 'cp', 'candy', 'xp') ) self.event_manager.register_event( 'pokemon_favored', parameters=('pokemon', 'iv', 'cp') ) self.event_manager.register_event( 'pokemon_unfavored', parameters=('pokemon', 'iv', 'cp') ) self.event_manager.register_event( 'pokemon_evolve_check', parameters=('has', 'needs') ) self.event_manager.register_event( 'pokemon_upgraded', parameters=('pokemon', 'iv', 'cp', 'new_cp', 'candy', 'stardust') ) self.event_manager.register_event('skip_evolve') self.event_manager.register_event('threw_berry_failed', parameters=('status_code',)) self.event_manager.register_event('vip_pokemon') self.event_manager.register_event('gained_candy', parameters=('gained_candy', 'quantity', 'type')) self.event_manager.register_event('catch_limit') self.event_manager.register_event('spin_limit') self.event_manager.register_event('show_best_pokemon', parameters=('pokemons')) self.event_manager.register_event('revived_pokemon') self.event_manager.register_event('healing_pokemon') # level up stuff self.event_manager.register_event( 'level_up', parameters=( 'previous_level', 'current_level' ) ) self.event_manager.register_event( 'level_up_reward', parameters=('items',) ) # lucky egg self.event_manager.register_event( 'used_lucky_egg', parameters=('amount_left',) ) self.event_manager.register_event('lucky_egg_error') # softban self.event_manager.register_event('softban') self.event_manager.register_event('softban_fix') self.event_manager.register_event('softban_fix_done') # egg incubating self.event_manager.register_event( 'incubate_try', parameters=( 'incubator_id', 'egg_id' ) ) self.event_manager.register_event( 'incubate', parameters=('distance_in_km',) ) self.event_manager.register_event( 'next_egg_incubates', parameters=('eggs_left', 'eggs_inc', 'eggs') ) self.event_manager.register_event('incubator_already_used') self.event_manager.register_event('egg_already_incubating') self.event_manager.register_event( 'egg_hatched', parameters=( 'name', 'cp', 'ncp', 'iv_ads', 'iv_pct', 'exp', 'stardust', 'candy' ) ) self.event_manager.register_event('egg_hatched_fail') # discard item self.event_manager.register_event( 'item_discarded', parameters=( 'amount', 'item', 'maximum' ) ) self.event_manager.register_event( 'item_discard_skipped', parameters=('space',) ) self.event_manager.register_event( 'item_discard_fail', parameters=('item',) ) # inventory self.event_manager.register_event('inventory_full') # release self.event_manager.register_event( 'keep_best_release', parameters=( 'amount', 'pokemon', 'criteria' ) ) self.event_manager.register_event( 'future_pokemon_release', parameters=( 'pokemon', 'cp', 'iv', 'ivcp', 'below_iv', 'below_cp', 'below_ivcp', 'cp_iv_logic' ) ) self.event_manager.register_event( 'pokemon_release', parameters=('pokemon', 'iv', 'cp', 'ivcp', 'candy', 'candy_type') ) self.event_manager.register_event( 'pokemon_keep', parameters=('pokemon', 'iv', 'cp', 'ivcp') ) # polyline walker self.event_manager.register_event( 'polyline_request', parameters=('url',) ) # cluster self.event_manager.register_event( 'found_cluster', parameters=( 'num_points', 'forts', 'radius', 'distance' ) ) self.event_manager.register_event( 'arrived_at_cluster', parameters=( 'num_points', 'forts', 'radius' ) ) # rename self.event_manager.register_event( 'rename_pokemon', parameters=('old_name', 'current_name',) ) self.event_manager.register_event( 'pokemon_nickname_invalid', parameters=('nickname',) ) self.event_manager.register_event( 'unset_pokemon_nickname', parameters=('old_name',) ) # Move To map pokemon self.event_manager.register_event( 'move_to_map_pokemon_fail', parameters=('message',) ) self.event_manager.register_event( 'move_to_map_pokemon_updated_map', parameters=('lat', 'lon') ) self.event_manager.register_event( 'move_to_map_pokemon_teleport_to', parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon', 'disappears_in') ) self.event_manager.register_event( 'move_to_map_pokemon_encounter', parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon', 'disappears_in') ) self.event_manager.register_event( 'move_to_map_pokemon_move_towards', parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon', 'disappears_in') ) self.event_manager.register_event( 'move_to_map_pokemon_teleport_back', parameters=('last_lat', 'last_lon') ) self.event_manager.register_event( 'moving_to_pokemon_throught_fort', parameters=('fort_name', 'distance','poke_name','poke_dist') ) self.event_manager.register_event( 'move_to_map_pokemon', parameters=('message') ) # cached recent_forts self.event_manager.register_event('loaded_cached_forts') self.event_manager.register_event('cached_fort') self.event_manager.register_event( 'no_cached_forts', parameters=('path', ) ) self.event_manager.register_event( 'error_caching_forts', parameters=('path', ) ) # database shit self.event_manager.register_event('catch_log') self.event_manager.register_event('vanish_log') self.event_manager.register_event('evolve_log') self.event_manager.register_event('login_log') self.event_manager.register_event('transfer_log') self.event_manager.register_event('pokestop_log') self.event_manager.register_event('softban_log') self.event_manager.register_event('eggs_hatched_log') self.event_manager.register_event( 'badges', parameters=('badge', 'level') ) self.event_manager.register_event( 'player_data', parameters=('player_data', ) ) self.event_manager.register_event( 'forts_found', parameters=('json') ) # UseIncense self.event_manager.register_event( 'use_incense', parameters=('type', 'incense_count') ) # BuddyPokemon self.event_manager.register_event( 'buddy_update', parameters=('name') ) self.event_manager.register_event( 'buddy_update_fail', parameters=('name', 'error') ) self.event_manager.register_event( 'buddy_candy_earned', parameters=('candy', 'family', 'quantity', 'candy_earned', 'candy_limit') ) self.event_manager.register_event('buddy_candy_fail') self.event_manager.register_event( 'buddy_next_reward', parameters=('name', 'km_walked', 'km_total') ) self.event_manager.register_event('buddy_keep_active') self.event_manager.register_event( 'buddy_not_available', parameters=('name') ) # Sniper self.event_manager.register_event('sniper_log', parameters=('message', 'message')) self.event_manager.register_event('sniper_error', parameters=('message', 'message')) self.event_manager.register_event('sniper_teleporting', parameters=('latitude', 'longitude', 'name')) # Catch-limiter self.event_manager.register_event('catch_limit_on') self.event_manager.register_event('catch_limit_off') self.event_manager.register_event( 'pokemon_knock_out_gym', parameters=('pokemon', 'gym_name', 'notification_date', 'awarded_coins', 'awarded_coins_today') ) self.event_manager.register_event( 'pokemon_hungy', parameters=('pokemon', 'gym_name', 'notification_date') ) def tick(self): self.health_record.heartbeat() self.cell = self.get_meta_cell() if self.sleep_schedule: self.sleep_schedule.work() now = time.time() * 1000 for fort in self.cell["forts"]: timeout = fort.get("cooldown_complete_timestamp_ms", 0) if timeout >= now: self.fort_timeouts[fort["id"]] = timeout self._refresh_inventory() self.tick_count += 1 # Check if session token has expired self.check_session(self.position) for worker in self.workers: if worker.work() == WorkerResult.RUNNING: return def get_meta_cell(self): location = self.position[0:2] cells = self.find_close_cells(*location) # Combine all cells into a single dict of the items we care about. forts = [] wild_pokemons = [] catchable_pokemons = [] nearby_pokemons = [] for cell in cells: if "forts" in cell and len(cell["forts"]): forts += cell["forts"] if "wild_pokemons" in cell and len(cell["wild_pokemons"]): wild_pokemons += cell["wild_pokemons"] if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]): catchable_pokemons += cell["catchable_pokemons"] if "nearby_pokemons" in cell and len(cell["nearby_pokemons"]): latlng = LatLng.from_point(Cell(CellId(cell["s2_cell_id"])).get_center()) for p in cell["nearby_pokemons"]: p["latitude"] = latlng.lat().degrees p["longitude"] = latlng.lng().degrees p["s2_cell_id"] = cell["s2_cell_id"] nearby_pokemons += cell["nearby_pokemons"] # If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved if len(forts) > 1 or not self.cell: return { "forts": forts, "wild_pokemons": wild_pokemons, "catchable_pokemons": catchable_pokemons, "nearby_pokemons": nearby_pokemons } # If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells. else: return { "forts": self.cell["forts"], "wild_pokemons": wild_pokemons, "catchable_pokemons": catchable_pokemons, "nearby_pokemons": nearby_pokemons } def update_web_location(self, cells=[], lat=None, lng=None, alt=None): # we can call the function with no arguments and still get the position # and map_cells if lat is None: lat = self.api._position_lat if lng is None: lng = self.api._position_lng if alt is None: alt = self.api._position_alt # dont cache when teleport_to if self.api.teleporting: return if cells == []: location = self.position[0:2] cells = self.find_close_cells(*location) user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username) try: with open(user_data_cells, 'w') as outfile: json.dump(cells, outfile) except IOError as e: self.logger.info('[x] Error while opening location file: %s' % e) user_web_location = os.path.join( _base_dir, 'web', 'location-%s.json' % self.config.username ) # alt is unused atm but makes using *location easier try: with open(user_web_location, 'w') as outfile: json.dump({ 'lat': lat, 'lng': lng, 'alt': alt, 'cells': cells }, outfile) except IOError as e: self.logger.info('[x] Error while opening location file: %s' % e) user_data_lastlocation = os.path.join( _base_dir, 'data', 'last-location-%s.json' % self.config.username ) try: with open(user_data_lastlocation, 'w') as outfile: json.dump({'lat': lat, 'lng': lng, 'alt': alt, 'start_position': self.start_position}, outfile) except IOError as e: self.logger.info('[x] Error while opening location file: %s' % e) def emit_forts_event(self,response_dict): map_objects = response_dict.get( 'responses', {} ).get('GET_MAP_OBJECTS', {}) status = map_objects.get('status', None) map_cells = [] if status and status == 1: map_cells = map_objects['map_cells'] if map_cells and len(map_cells): for cell in map_cells: if "forts" in cell and len(cell["forts"]): self.event_manager.emit( 'forts_found', sender=self, level='debug', formatted='Found forts {json}', data={'json': json.dumps(cell["forts"])} ) def find_close_cells(self, lat, lng): cellid = get_cell_ids(lat, lng) timestamp = [0, ] * len(cellid) response_dict = self.get_map_objects(lat, lng, timestamp, cellid) map_objects = response_dict.get( 'responses', {} ).get('GET_MAP_OBJECTS', {}) status = map_objects.get('status', None) map_cells = [] if status and status == 1: map_cells = map_objects['map_cells'] position = (lat, lng, 0) map_cells.sort( key=lambda x: distance( lat, lng, x['forts'][0]['latitude'], x['forts'][0]['longitude']) if x.get('forts', []) else 1e6 ) return map_cells def check_session(self, position): # Check session expiry if self.api._auth_provider and self.api._auth_provider._ticket_expire: # prevent crash if return not numeric value if not str(self.api._auth_provider._ticket_expire).isdigit(): self.logger.info("Ticket expired value is not numeric", 'yellow') remaining_time = \ self.api._auth_provider._ticket_expire / 1000 - time.time() if remaining_time < 60: self.event_manager.emit( 'api_error', sender=self, level='info', formatted='Session stale, re-logging in.' ) self.api = ApiWrapper(config=self.config) self.api.set_position(*position) self.login() def login(self): status = {} retry = 0 quit_login = False self.event_manager.emit( 'login_started', sender=self, level='info', formatted="Login procedure started." ) lat, lng = self.position[0:2] self.api.set_position(lat, lng, self.alt) # or should the alt kept to zero? def yes_no( question ): # raw_input returns the empty string for "enter" yes = set(['yes','y', 'ye', '']) no = set(['no','n']) print question choice = raw_input().lower() if choice in yes: return True elif choice in no: return False else: print "Please respond with 'yes' or 'no'" return None while not quit_login: try: self.api.login( self.config.auth_service, str(self.config.username), str(self.config.password)) # No exception, set quit_login = true quit_login = True except AuthException as e: self.event_manager.emit( 'login_failed', sender=self, level='info', formatted='Login process failed: {}'.format(e) ) # Exception encountered. Retry 3 times, everytime increase wait time 5 secs retry += 1 sleeptime = retry*5 self.event_manager.emit( 'login_failed', sender=self, level='info', formatted="Retry {} time(s) for {} secs".format(retry,sleeptime) ) sleep(retry*5) # Quit after 3rd tries if retry == 3: sys.exit() with self.database as conn: c = conn.cursor() c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='login'") result = c.fetchone() if result[0] == 1: conn.execute('''INSERT INTO login (timestamp, message) VALUES (?, ?)''', (time.time(), 'LOGIN_SUCCESS')) else: self.event_manager.emit( 'login_failed', sender=self, level='info', formatted="Login table not founded, skipping log" ) self.event_manager.emit( 'login_successful', sender=self, level='info', formatted="Login successful." ) # Start of security, to get various API Versions from different sources # Get Official API link = "https://pgorelease.nianticlabs.com/plfe/version" f = urllib2.urlopen(link) myfile = f.read() f.close() officalAPI = myfile[2:8] self.event_manager.emit( 'security_check', sender=self, level='info', formatted="Niantic Official API Version: {}".format(officalAPI) ) PGoAPI_version = PGoApi.get_api_version() PGoAPI_version_str = str(PGoAPI_version) PGoAPI_version_str = "0."+ PGoAPI_version_str[0:2] + "." + PGoAPI_version_str[-1] self.event_manager.emit( 'security_check', sender=self, level='info', formatted="Bot is currently running on API {}".format(PGoAPI_version_str) ) if self.config.check_niantic_api is True: if HashServer.endpoint == "": self.event_manager.emit( 'security_check', sender=self, level='info', formatted="Warning: Bot is running on legacy API" ) else: officialAPI_int = int(officalAPI.replace('.','')) PGoAPI_version_tmp = str(PGoAPI_version) PGoAPI_version_tmp = PGoAPI_version_tmp[0:2] + PGoAPI_version_tmp[-1] PGoAPI_version_int = int(PGoAPI_version_tmp) if PGoAPI_version_int < officialAPI_int: self.event_manager.emit( 'security_check', sender=self, level='info', formatted="We have detected a Pokemon API Change. Latest Niantic Version is: {}. Program Exiting...".format(officalAPI) ) yn=None while yn==None: yn = yes_no("Warning: A new pokemon API version is found. Do you want to keep the bot running on your own risk of loosing your account? Y/N") if not yn: sys.exit(1) else: self.event_manager.emit( 'security_check', sender=self, level='info', formatted="Current PGoAPI is using {} API. Niantic API Check Pass".format(PGoAPI_version_str) ) self.heartbeat() def _setup_api(self): # instantiate pgoapi @var ApiWrapper self.api = ApiWrapper(config=self.config) # provide player position on the earth self._set_starting_position() self.login() # chain subrequests (methods) into one RPC call self.logger.info('') # send empty map_cells and then our position self.update_web_location() def _print_character_info(self): # get player profile call # ---------------------- request = self.api.create_request() request.get_player() response_dict = request.call() # print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2))) currency_1 = "0" currency_2 = "0" warn = False if response_dict: self._player = response_dict['responses']['GET_PLAYER']['player_data'] if 'warn' in response_dict['responses']['GET_PLAYER']: warn = response_dict['responses']['GET_PLAYER']['warn'] player = self._player else: self.logger.info( "The API didn't return player info, servers are unstable - " "retrying.", 'red' ) sleep(5) self._print_character_info() # @@@ TODO: Convert this to d/m/Y H:M:S creation_date = datetime.datetime.fromtimestamp( player['creation_timestamp_ms'] / 1e3) creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S") pokecoins = '0' stardust = '0' items_inventory = inventory.items() if 'amount' in player['currencies'][0]: pokecoins = player['currencies'][0]['amount'] if 'amount' in player['currencies'][1]: stardust = player['currencies'][1]['amount'] self.logger.info('') self.logger.info('--- {username} ---'.format(**player)) self.logger.info( 'Pokemon Bag: {}/{}'.format( inventory.Pokemons.get_space_used(), inventory.get_pokemon_inventory_size() ) ) self.logger.info( 'Items: {}/{}'.format( inventory.Items.get_space_used(), inventory.get_item_inventory_size() ) ) self.logger.info( 'Stardust: {}'.format(stardust) + ' | Pokecoins: {}'.format(pokecoins) ) # Items Output self.logger.info( 'PokeBalls: ' + str(items_inventory.get(1).count) + ' | Great Balls: ' + str(items_inventory.get(2).count) + ' | Ultra Balls: ' + str(items_inventory.get(3).count) + ' | Master Balls: ' + str(items_inventory.get(4).count)) self.logger.info( 'RazzBerries: ' + str(items_inventory.get(701).count) + ' | Nanab Berries: ' + str(items_inventory.get(703).count) + ' | Pinap Berries: ' + str(items_inventory.get(705).count) + ' | Golden RazzBerries: ' + str(items_inventory.get(706).count) + ' | Golden Nanab Berries: ' + str(items_inventory.get(707).count) + ' | Golden Pinap Berries: ' + str(items_inventory.get(708).count)) self.logger.info( 'LuckyEgg: ' + str(items_inventory.get(301).count) + ' | Incubator: ' + str(items_inventory.get(902).count)) self.logger.info( 'Potion: ' + str(items_inventory.get(101).count) + ' | Super Potion: ' + str(items_inventory.get(102).count) + ' | Hyper Potion: ' + str(items_inventory.get(103).count) + ' | Max Potion: ' + str(items_inventory.get(104).count)) self.logger.info( 'Incense: ' + str(items_inventory.get(401).count) + ' | Lure Module: ' + str(items_inventory.get(501).count)) self.logger.info( 'Revive: ' + str(items_inventory.get(201).count) + ' | Max Revive: ' + str(items_inventory.get(202).count)) self.logger.info( 'Sun Stone: ' + str(items_inventory.get(1101).count) + ' | Kings Rock: ' + str(items_inventory.get(1102).count) + ' | Metal Coat: ' + str(items_inventory.get(1103).count) + ' | Dragon Scale: ' + str(items_inventory.get(1104).count) + ' | Upgrade: ' + str(items_inventory.get(1105).count)) self.logger.info( 'Fast TM: ' + str(items_inventory.get(1201).count) + ' | Charge TM: ' + str(items_inventory.get(1202).count) + ' | Rare Candy: ' + str(items_inventory.get(1301).count) + ' | Free Raid Pass: ' + str(items_inventory.get(1401).count) + ' | Premium Raid Pass: ' + str(items_inventory.get(1402).count) + ' | Legendary Raid Pass: ' + str(items_inventory.get(1403).count)) if warn: self.logger.info('') self.event_manager.emit( 'niantic_warning', sender=self, level='warning', formatted="This account has recieved a warning from Niantic. Bot at own risk." ) sleep(5) # Pause to allow user to see warning self.logger.info('') def _print_list_pokemon(self): # get pokemon list bag = inventory.pokemons().all() id_list =list(set(map(lambda x: x.pokemon_id, bag))) id_list.sort() pokemon_list = [filter(lambda x: x.pokemon_id == y, bag) for y in id_list] show_count = self.config.pokemon_bag_show_count show_candies = self.config.pokemon_bag_show_candies poke_info_displayed = self.config.pokemon_bag_pokemon_info def get_poke_info(info, pokemon): poke_info = { 'cp': 'CP {}'.format(pokemon.cp), 'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina), 'iv_pct': 'IV {}'.format(pokemon.iv), 'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)), 'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)), 'level': "Level {}".format(pokemon.level), 'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max), 'moveset': 'Moves: {}'.format(pokemon.moveset), 'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2)) } if info not in poke_info: raise ConfigException("info '{}' isn't available for displaying".format(info)) return poke_info[info] self.logger.info('Pokemon:') for pokes in pokemon_list: pokes.sort(key=lambda p: p.cp, reverse=True) line_p = '#{} {}'.format(pokes[0].pokemon_id, pokes[0].name) if show_count: line_p += '[{}]'.format(len(pokes)) if show_candies: line_p += '[{} candies]'.format(pokes[0].candy_quantity) line_p += ': ' poke_info = ['({})'.format(', '.join([get_poke_info(x, p) for x in poke_info_displayed])) for p in pokes] self.logger.info(line_p + ' | '.join(poke_info)) self.logger.info('') def use_lucky_egg(self): request = self.api.create_request() request.use_item_xp_boost(item_id=301) return request.call() def _set_starting_position(self): self.event_manager.emit( 'set_start_location', sender=self, level='info', formatted='Setting start location.' ) has_position = False if self.config.test: # TODO: Add unit tests return if self.wake_location: msg = "Wake up location found: {location} {position}" self.event_manager.emit( 'location_found', sender=self, level='info', formatted=msg, data={ 'location': self.wake_location['raw'], 'position': self.wake_location['coord'] } ) self.api.set_position(*self.wake_location['coord']) self.event_manager.emit( 'position_update', sender=self, level='info', formatted="Now at {current_position}", data={ 'current_position': self.position, 'last_position': '', 'distance': '', 'distance_unit': '' } ) self.start_position = self.position has_position = True return if self.config.location: location_str = self.config.location location = self.get_pos_by_name(location_str.replace(" ", "")) msg = "Location found: {location} {position}" self.event_manager.emit( 'location_found', sender=self, level='info', formatted=msg, data={ 'location': location_str, 'position': location } ) self.api.set_position(*location) self.event_manager.emit( 'position_update', sender=self, level='info', formatted="Now at {current_position}", data={ 'current_position': self.position, 'last_position': '', 'distance': '', 'distance_unit': '' } ) self.start_position = self.position has_position = True if self.config.location_cache: try: # save location flag used to pull the last known location from # the location.json self.event_manager.emit( 'load_cached_location', sender=self, level='debug', formatted='Loading cached location...' ) json_file = os.path.join(_base_dir, 'data', 'last-location-%s.json' % self.config.username) try: with open(json_file, "r") as infile: location_json = json.load(infile) except (IOError, ValueError): # Unable to read json file. # File may be corrupt. Create a new one. location_json = [] except: raise FileIOException("Unexpected error reading from {}".web_inventory) location = ( location_json['lat'], location_json['lng'], location_json['alt'], ) # If location has been set in config, only use cache if starting position has not differed if has_position and 'start_position' in location_json: last_start_position = tuple(location_json.get('start_position', [])) # Start position has to have been set on a previous run to do this check if last_start_position and last_start_position != self.start_position: msg = 'Going to a new place, ignoring cached location.' self.event_manager.emit( 'location_cache_ignored', sender=self, level='debug', formatted=msg ) return self.api.set_position(*location) self.event_manager.emit( 'position_update', sender=self, level='debug', formatted='Loaded location {current_position} from cache', data={ 'current_position': location, 'last_position': '', 'distance': '', 'distance_unit': '' } ) has_position = True except Exception: if has_position is False: sys.exit( "No cached Location. Please specify initial location." ) self.event_manager.emit( 'location_cache_error', sender=self, level='debug', formatted='Parsing cached location failed.' ) def get_pos_by_name(self, location_name): # Check if given location name, belongs to favorite_locations favorite_location_coords = self._get_pos_by_fav_location(location_name) if favorite_location_coords is not None: return favorite_location_coords # Check if the given location is already a coordinate. if ',' in location_name: possible_coordinates = re.findall( "[-]?\d{1,3}(?:[.]\d+)?", location_name ) if len(possible_coordinates) >= 2: # 2 matches, this must be a coordinate. We'll bypass the Google # geocode so we keep the exact location. self.logger.info( '[x] Coordinates found in passed in location, ' 'not geocoding.' ) return float(possible_coordinates[0]), float(possible_coordinates[1]), (float(possible_coordinates[2]) if len(possible_coordinates) == 3 else self.alt) geolocator = GoogleV3(api_key=self.config.gmapkey) loc = geolocator.geocode(location_name, timeout=10) return float(loc.latitude), float(loc.longitude), float(loc.altitude) def _get_pos_by_fav_location(self, location_name): location_name = location_name.lower() coords = None for location in self.config.favorite_locations: if location.get('name').lower() == location_name: coords = re.findall( "[-]?\d{1,3}[.]\d{3,7}", location.get('coords').strip() ) if len(coords) >= 2: self.logger.info('Favorite location found: {} ({})'.format(location_name, coords)) break #TODO: This is real bad if coords is None: return coords else: return float(coords[0]), float(coords[1]), (float(coords[2]) if len(coords) == 3 else self.alt) def heartbeat(self): # Remove forts that we can now spin again. now = time.time() self.fort_timeouts = {id: timeout for id, timeout in self.fort_timeouts.iteritems() if timeout >= now * 1000} if now - self.last_heartbeat >= self.heartbeat_threshold and not self.hb_locked: previous_heartbeat = self.last_heartbeat self.last_heartbeat = now request = self.api.create_request() request.get_player() request.check_awarded_badges() request.get_inbox() responses = None try: responses = request.call() except NotLoggedInException: self.logger.warning('Unable to login, retying') self.empty_response = True except: self.logger.warning('Error occured in heatbeat, retying') self.empty_response = True if not self.empty_response: if responses['responses']['GET_PLAYER']['success'] == True: # we get the player_data anyway, might as well store it self._player = responses['responses']['GET_PLAYER']['player_data'] self.event_manager.emit( 'player_data', sender=self, level='debug', formatted='player_data: {player_data}', data={'player_data': self._player} ) if responses['responses']['GET_INBOX']['result'] == 1: self._inbox = responses['responses']['GET_INBOX']['inbox'] # self.logger.info("Got inbox messages?") # self.logger.info("Inbox: %s" % responses['responses']['GET_INBOX']) if 'notifications' in self._inbox: for notification in self._inbox['notifications']: notification_date = datetime.datetime.fromtimestamp(int(notification['create_timestamp_ms']) / 1e3) if previous_heartbeat > (int(notification['create_timestamp_ms']) / 1e3): # Skipp old notifications! continue if notification['category'] == 'pokemon_hungry': gym_name = pokemon = 'Unknown' for variable in notification['variables']: if variable['name'] == 'GYM_NAME': gym_name = variable['literal'] if variable['name'] == 'POKEMON_NICKNAME': pokemon = variable['literal'] self.event_manager.emit( 'pokemon_hungy', sender=self, level='info', formatted='{pokemon} in the Gym {gym_name} is hungy and want a candy! {notification_date}', data={ 'pokemon': pokemon, 'gym_name': gym_name, 'notification_date': notification_date.strftime('%Y-%m-%d %H:%M:%S.%f') } ) if notification['category'] == 'gym_removal': gym_name = pokemon = 'Unknown' for variable in notification['variables']: if variable['name'] == 'GYM_NAME': gym_name = variable['literal'] if variable['name'] == 'POKEMON_NICKNAME': pokemon = variable['literal'] if variable['name'] == 'POKECOIN_AWARDED': coins_awared = variable['literal'] if variable['name'] == 'POKECOIN_AWARDED_TODAY': coins_awared_today = variable['literal'] self.event_manager.emit( 'pokemon_knock_out_gym', sender=self, level='info', formatted='{pokemon} has been knocked out the Gym {gym_name} at {notification_date}. Awarded coins: {awarded_coins} | Today awared: {awarded_coins_today}', data={ 'pokemon': pokemon, 'gym_name': gym_name, 'notification_date': notification_date.strftime('%Y-%m-%d %H:%M:%S.%f'), 'awarded_coins': coins_awared, 'awarded_coins_today': coins_awared_today } ) if responses['responses']['CHECK_AWARDED_BADGES']['success'] == True: # store awarded_badges reponse to be used in a task or part of heartbeat self._awarded_badges = responses['responses']['CHECK_AWARDED_BADGES'] if 'awarded_badges' in self._awarded_badges: i = 0 for badge in self._awarded_badges['awarded_badges']: badgelevel = self._awarded_badges['awarded_badge_levels'][i] badgename = badge_type_pb2._BADGETYPE.values_by_number[badge].name i += 1 self.event_manager.emit( 'badges', sender=self, level='info', formatted='awarded badge: {badge}, lvl {level}', data={'badge': badgename, 'level': badgelevel} ) human_behaviour.action_delay(3, 10) try: self.web_update_queue.put_nowait(True) # do this outside of thread every tick except Queue.Full: pass threading.Timer(self.heartbeat_threshold, self.heartbeat).start() def update_web_location_worker(self): while True: self.web_update_queue.get() #skip undate if no response if not self.empty_response: self.update_web_location() def display_player_info(self): player_stats = player() if player_stats: nextlvlxp = (int(player_stats.next_level_xp) - int(player_stats.exp)) self.logger.info( 'Level: {}'.format(player_stats.level) + ' (Next Level: {} XP)'.format(nextlvlxp) + ' (Total: {} XP)' ''.format(player_stats.exp)) self.logger.info( 'Pokemon Captured: ' '{}'.format(player_stats.pokemons_captured) + ' | Pokestops Visited: ' '{}'.format(player_stats.poke_stop_visits)) def get_forts(self, order_by_distance=False): forts = [fort for fort in self.cell['forts'] if 'latitude' in fort and 'longitude' in fort] # Need to filter out disabled forts! forts = filter(lambda x: x["enabled"] is True, forts) forts = filter(lambda x: 'closed' not in fort, forts) if order_by_distance: forts.sort(key=lambda x: distance( self.position[0], self.position[1], x['latitude'], x['longitude'] )) return forts if order_by_distance: forts.sort(key=lambda x: distance( self.position[0], self.position[1], x['latitude'], x['longitude'] )) return forts def get_gyms(self, order_by_distance=False): forts = [fort for fort in self.cell['forts'] if 'latitude' in fort and 'type' not in fort] # Need to filter out disabled gyms! forts = filter(lambda x: x["enabled"] is True, forts) forts = filter(lambda x: 'closed' not in fort, forts) if order_by_distance: forts.sort(key=lambda x: distance( self.position[0], self.position[1], x['latitude'], x['longitude'] )) return forts def get_map_objects(self, lat, lng, timestamp, cellid): if time.time() - self.last_time_map_object < self.config.map_object_cache_time: return self.last_map_object request = self.api.create_request() request.get_map_objects( latitude=f2i(lat), longitude=f2i(lng), since_timestamp_ms=timestamp, cell_id=cellid ) self.last_map_object = request.call() self.emit_forts_event(self.last_map_object) #if self.last_map_object: # print self.last_map_object self.last_time_map_object = time.time() return self.last_map_object def _load_recent_forts(self): if not self.config.forts_cache_recent_forts: return cached_forts_path = os.path.join(_base_dir, 'data', 'recent-forts-%s.json' % self.config.username) try: # load the cached recent forts cached_recent_forts = [] try: with open(cached_forts_path) as f: cached_recent_forts = json.load(f) except (IOError, ValueError) as e: self.logger.info('[x] Error while opening cached forts: %s' % e) except: raise FileIOException("Unexpected error opening {}".cached_forts_path) num_cached_recent_forts = len(cached_recent_forts) num_recent_forts = len(self.recent_forts) # Handles changes in max_circle_size if not num_recent_forts: self.recent_forts = [] elif num_recent_forts > num_cached_recent_forts: self.recent_forts[-num_cached_recent_forts:] = cached_recent_forts elif num_recent_forts < num_cached_recent_forts: self.recent_forts = cached_recent_forts[-num_recent_forts:] else: self.recent_forts = cached_recent_forts self.event_manager.emit( 'loaded_cached_forts', sender=self, level='debug', formatted='Loaded cached forts...' ) except IOError: self.event_manager.emit( 'no_cached_forts', sender=self, level='debug', formatted='Starting new cached forts for {path}', data={'path': cached_forts_path} ) def _refresh_inventory(self): # Perform inventory update every n seconds now = time.time() if now - self.last_inventory_refresh >= self.inventory_refresh_threshold: inventory.refresh_inventory() self.last_inventory_refresh = now self.inventory_refresh_counter += 1
{ "content_hash": "bff9003aa739b35781035beba8eca1dd", "timestamp": "", "source": "github", "line_count": 1778, "max_line_length": 188, "avg_line_length": 36.70809898762655, "alnum_prop": 0.5105796191030689, "repo_name": "goedzo/PokemonGo-Bot", "id": "accc1ffc419dd8b4ffe4848bae32daf73d1df1b6", "size": "65291", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pokemongo_bot/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "26769" }, { "name": "CSS", "bytes": "1519" }, { "name": "HTML", "bytes": "5645" }, { "name": "JavaScript", "bytes": "317991" }, { "name": "Python", "bytes": "863163" }, { "name": "Shell", "bytes": "9090" } ], "symlink_target": "" }
"""Test file to display the error message and verify it with FileCheck.""" # RUN: %p/saved_model_error | FileCheck %s from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from absl import app import tensorflow.compat.v2 as tf if hasattr(tf, 'enable_v2_behavior'): tf.enable_v2_behavior() class TestModule(tf.Module): """The test model has unsupported op.""" @tf.function(input_signature=[tf.TensorSpec(shape=[3, 3], dtype=tf.float32)]) def model(self, x): y = tf.math.reciprocal(x) # Not supported return y + y class TestGraphDebugInfo(object): """Test stack trace can be displayed.""" def testSavedModelDebugInfo(self): """Save a saved model with unsupported ops, and then load and convert it.""" # saved the model test_model = TestModule() saved_model_path = '/tmp/test.saved_model' save_options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(test_model, saved_model_path, options=save_options) # load the model and convert converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) converter.experimental_new_converter = True converter.convert() # pylint: disable=line-too-long # CHECK-LABEL: testSavedModelDebugInfo # CHECK: error: 'tf.Reciprocal' op is neither a custom op nor a flex op # CHECK: attrs=attr_protos, op_def=op_def) # CHECK: ^ # CHECK: {{.*tensorflow/python/ops/gen_math_ops.py:[0-9]+:[0-9]+: note: called from}} # CHECK: "Reciprocal", x=x, name=name) # CHECK: ^ # CHECK: {{.*tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py:[0-9]+:[0-9]+: note: called from}} # CHECK: y = tf.math.reciprocal(x) # Not supported # CHECK: ^ # CHECK: <unknown>:0: error: failed while converting: 'main' # pylint: enable=line-too-long def main(argv): """test driver method writes the error message to stdout.""" if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') try: TestGraphDebugInfo().testSavedModelDebugInfo() except Exception as e: # pylint: disable=broad-except sys.stdout.write('testSavedModelDebugInfo') sys.stdout.write(str(e)) if __name__ == '__main__': app.run(main)
{ "content_hash": "aed7c7d0549bb9d0c4ae4e3a3a2353c4", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 114, "avg_line_length": 32.22222222222222, "alnum_prop": 0.6719827586206897, "repo_name": "arborh/tensorflow", "id": "322330f1b9b945ecc6db42d49434dace5c5b8fb3", "size": "3009", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "5003" }, { "name": "Batchfile", "bytes": "45988" }, { "name": "C", "bytes": "773694" }, { "name": "C#", "bytes": "8562" }, { "name": "C++", "bytes": "76730781" }, { "name": "CMake", "bytes": "6545" }, { "name": "Dockerfile", "bytes": "81136" }, { "name": "Go", "bytes": "1679107" }, { "name": "HTML", "bytes": "4686483" }, { "name": "Java", "bytes": "952944" }, { "name": "Jupyter Notebook", "bytes": "567243" }, { "name": "LLVM", "bytes": "6536" }, { "name": "MLIR", "bytes": "1299305" }, { "name": "Makefile", "bytes": "61397" }, { "name": "Objective-C", "bytes": "104706" }, { "name": "Objective-C++", "bytes": "297753" }, { "name": "PHP", "bytes": "24055" }, { "name": "Pascal", "bytes": "3752" }, { "name": "Pawn", "bytes": "17546" }, { "name": "Perl", "bytes": "7536" }, { "name": "Python", "bytes": "38757009" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "7459" }, { "name": "Shell", "bytes": "643787" }, { "name": "Smarty", "bytes": "34727" }, { "name": "Swift", "bytes": "62814" } ], "symlink_target": "" }
"""Migration for a given Submitty course database.""" def up(config, database, semester, course): """ Run up migration. :param config: Object holding configuration details about Submitty :type config: migrator.config.Config :param database: Object for interacting with given database for environment :type database: migrator.db.Database :param semester: Semester of the course being migrated :type semester: str :param course: Code of course being migrated :type course: str """ database.execute("ALTER TABLE queue_settings ADD IF NOT EXISTS token TEXT NOT null DEFAULT 'temp_token'"); database.execute("Update queue_settings SET token = code Where token = 'temp_token';"); def down(config, database, semester, course): """ Run down migration (rollback). :param config: Object holding configuration details about Submitty :type config: migrator.config.Config :param database: Object for interacting with given database for environment :type database: migrator.db.Database :param semester: Semester of the course being migrated :type semester: str :param course: Code of course being migrated :type course: str """ database.execute("ALTER TABLE queue_settings DROP COLUMN IF EXISTS token;");
{ "content_hash": "6885323494c7d1afcf272c2ac686162f", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 110, "avg_line_length": 36.083333333333336, "alnum_prop": 0.7205542725173211, "repo_name": "Submitty/Submitty", "id": "491699f8e4d47893e3e5ee5661274f798a5ac2d2", "size": "1299", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "migration/migrator/migrations/course/20200131192137_office_hours_queue_queue_tokens.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "8450" }, { "name": "C++", "bytes": "496998" }, { "name": "CMake", "bytes": "1561" }, { "name": "CSS", "bytes": "210295" }, { "name": "HTML", "bytes": "799796" }, { "name": "Java", "bytes": "3828" }, { "name": "JavaScript", "bytes": "981630" }, { "name": "PHP", "bytes": "3103857" }, { "name": "PLpgSQL", "bytes": "122825" }, { "name": "Python", "bytes": "1589891" }, { "name": "Shell", "bytes": "205161" }, { "name": "TeX", "bytes": "21960" }, { "name": "Twig", "bytes": "1239136" }, { "name": "TypeScript", "bytes": "17328" } ], "symlink_target": "" }
__version__ = '0.1.3' try: import uwsgi except ImportError: uwsgi = None try: import cPickle as pickle except ImportError: import pickle default_app_config = 'django_uwsgi.apps.DjangoUwsgiConfig'
{ "content_hash": "fca447cff08c557657ba83d8023cb84e", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 58, "avg_line_length": 15.428571428571429, "alnum_prop": 0.6990740740740741, "repo_name": "brente/django-uwsgi", "id": "de8fad83c6e05c70bb92f7046b4f2ce2b7a0c476", "size": "216", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django_uwsgi/__init__.py", "mode": "33261", "license": "mit", "language": [ { "name": "HTML", "bytes": "13617" }, { "name": "JavaScript", "bytes": "196" }, { "name": "Python", "bytes": "30099" } ], "symlink_target": "" }
import logging from string import split from Functions import all, formatIntList, formatInt def ipAdrStrToInt( adrStr ): """ Convert a dotted ip address to 32 bit integer. """ adrParts = split( adrStr, ".", 3 ) return (int(adrParts[0]) << 24) + (int(adrParts[1]) << 16) + (int(adrParts[2]) << 8) + int(adrParts[3]) def addBroadcastBits( iAdr, bitCount ): """ iAdr is 32 bit integer bitCount is integer. """ # set the broadcast values for idx in range( 32-bitCount ): iAdr = iAdr | (1 << idx) return iAdr def getBroadcastAddressI( adrStr, bitStr ): """ Returns address as Integer """ # else has netmask part. iAdr = ipAdrStrToInt( adrStr ) # integer address bAdr = addBroadcastBits( iAdr, int( bitStr ) ) return bAdr def getBroadcastAddress( adrStr ): """ Convert an ip address in form nn.nn.nn.nn/bb into its broadcast address format. b/bb is optional and assumes caller knows what they are doing. """ netParts = split( adrStr, "/", 1 ) if ( len(netParts) == 1 ): return adrStr # else has netmask part. # else has netmask part. iAdr = ipAdrStrToInt( netParts[0] ) # integer address bAdr = getBroadcastAddressI( netParts[0], netParts[1] ) return "%i.%i.%i.%i" % ( ((bAdr>>24)&0xFF), ((bAdr>>16)&0xFF), ((bAdr>>8)&0xFF), (bAdr&0xFF) ) # Helper functions for processing IP addresses as lists of int values # (I think this representation will be easier to migrate to also support IPv6 - GK) def parseIpAdrs(ipadrs): """ Parse IP address in dotted decomal form, and return a sequence of 4 numbers """ # Strip of any port and/or netmask bits ipadrs = ipadrs.split('/')[0].split(':')[0] return map(int, ipadrs.split('.')) def parseNetAdrs(netadrs): """ Parse network address specification, returning a pair of: (a) IP address bytes tuple (b) number of '1' bits in netmask """ (ipadrs,maskbits) = netadrs.split('/') return (parseIpAdrs(ipadrs),int(maskbits)) def formatIpAdrs(ipbytes): """ Format IP address string from IP addre4ss bytes """ # return "%d.%d.%d.%d" % ipbytes return formatIntList(ipbytes,".") def formatNetAdrs(ipbytes,maskbits): """ Format network address string from IP address bytes and mask bit count """ return formatIpAdrs(ipbytes)+("/%d" % maskbits) def mkNetMask(ipbytes,maskbits=None): """ Make a network mask value as a sequence of IP address bytes May be called with 1 or 2 arguments: if 1 argument, it is a pair of (netbytes,maskbits) if 2 arguments, the first is just netbytes, and the second is maskbits """ if not maskbits: (ipbytes,maskbits) = ipbytes netmask = [] for b in ipbytes: m = 0 if maskbits >= 8: m = 255 elif maskbits > 0: m = (0, 128, 128+64, 128+64+32, 128+64+32+16, 128+64+32+16+8, 128+64+32+16+8+4, 128+64+32+16+8+4+2)[maskbits] netmask.append(m) maskbits -= 8 return netmask def mkBroadcastAddress(netbytes,maskbits=None): """ Make broadcast address for a given network May be called with 1 or 2 arguments: if 1 argument, it is a pair of (netbytes,maskbits) if 2 arguments, the first is just netbytes, and the secvond is maskbits """ def makeadrbyte(m, a): return (~m | a) & 0xFF if not maskbits: (netbytes,maskbits) = netbytes netmask = mkNetMask(netbytes,maskbits) return map(makeadrbyte, netmask, netbytes) def ipInNetwork(ipbytes, netbytes, maskbits=None): """ Test if IP address is part of given network May be called with 2 or 3 arguments: if 2 arguments, the second is a pair of (netbytes,maskbits) if 3 arguments, the second is just netbytes, and the third is maskbits """ def testadrbyte(m, n, a): return (m & a) == (m & n) if not maskbits: (netbytes,maskbits) = netbytes netmask = mkNetMask(netbytes, maskbits) return all(testadrbyte, netmask, netbytes, ipbytes) def getHostIpsAndMask(): """ Helper function returns list of IP networks connected to the current host. Each value is in the form address/maskbits, e.g. 10.0.0.0/8 """ result = list() from socket import gethostbyname_ex, gethostname try: hosts = gethostbyname_ex( gethostname( ) ) for addr in hosts[2]: # convert to ... byts = parseIpAdrs(addr) if byts[0] >= 192: # class C result.append( "%i.%i.%i.0/24" % (byts[0],byts[1],byts[2]) ) elif byts[0] >= 128: # class B result.append( "%i.%i.0.0/16" % (byts[0],byts[1]) ) else: # class A result.append( "%i.0.0.0/8" % (byts[0]) ) except Exception, ex : _log = logging.getLogger('WebBrickLibs.MiscLib.NetUtils') _log.exception(ex) return result # Helper functions for processing MAC addresses as lists of integers def parseMacAdrs(macadrs): """ Parse Mac address in colon-hexadecimal form, and return a sequence of 6 numbers """ def hex(h): return int(h,16) return map(hex, macadrs.split(':')) def formatMacAdrs(macbytes,sep=":"): """ Format MAC address as colon-separated hexadecimals for webBrick command """ return formatIntList(macbytes, sep, formatInt("%02X")) # test cases def _test(): i = parseIpAdrs("193.123.216.121") x = parseIpAdrs("193.123.216.200") n = parseNetAdrs("193.123.216.64/26") b = mkBroadcastAddress(*n) assert formatIpAdrs(b) == "193.123.216.127" assert ipInNetwork(i,*n) assert ipInNetwork(b,*n) assert not ipInNetwork(x,*n) assert parseMacAdrs("01:34:67:9a:BC:eF") == [1,52,103,154,188,239] assert formatMacAdrs([1,52,103,154,188,239],sep='-') == "01-34-67-9A-BC-EF" _test() # End $Id: NetUtils.py 1047 2009-01-15 14:48:58Z graham $
{ "content_hash": "0bfb5ba3bded99389c038a75becf7f0d", "timestamp": "", "source": "github", "line_count": 192, "max_line_length": 108, "avg_line_length": 31.838541666666668, "alnum_prop": 0.6100114510060527, "repo_name": "tectronics/admiral-jiscmrd", "id": "f032f1e069fb1062355126868be608876bf4c590", "size": "6193", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "src/AdminUIHandler/MiscLib/NetUtils.py", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "8097" }, { "name": "Brainfuck", "bytes": "90" }, { "name": "C", "bytes": "16" }, { "name": "CSS", "bytes": "27036" }, { "name": "HTML", "bytes": "474331" }, { "name": "JavaScript", "bytes": "3208858" }, { "name": "Mako", "bytes": "74" }, { "name": "PHP", "bytes": "6124" }, { "name": "Python", "bytes": "1759473" }, { "name": "Shell", "bytes": "67444" } ], "symlink_target": "" }
'''active twoq mixins''' from collections import deque from contextlib import contextmanager from stuf.utils import clsname from twoq.queuing import ThingsMixin, ResultMixin __all__ = ('AutoQMixin', 'ManQMixin', 'AutoResultMixin', 'ManResultMixin') class BaseQMixin(ThingsMixin): '''base active things''' def __init__(self, *things): deque_ = deque incoming = deque_(things[0]) if len(things) == 1 else deque_(things) super(BaseQMixin, self).__init__(incoming, deque_()) # set iterator self._iterator = self._iterexcept # work things self._work = deque_() # utility things self._util = deque_() def __repr__(self): getr_, list_ = lambda x: getattr(self, x), list return ( '<{}.{}([IN: {}({}) => WORK: {}({}) => UTIL: {}({}) => ' 'OUT: {}: ({})]) at {}' ).format( self.__module__, clsname(self), self._INQ, list_(getr_(self._INQ)), self._WORKQ, list_(getr_(self._WORKQ)), self._UTILQ, list_(getr_(self._UTILQ)), self._OUTQ, list_(getr_(self._OUTQ)), id(self), ) ########################################################################### ## thing length ########################################################### ########################################################################### def __len__(self): '''number of incoming things''' return len(self.incoming) def outcount(self): '''number of outgoing things''' return len(self.outgoing) ########################################################################### ## iterators ############################################################## ########################################################################### def __iter__(self): '''yield outgoing things, clearing outgoing things as it iterates''' return self.iterexcept(self.outgoing.popleft, IndexError) @property def _iterable(self): '''iterable''' return self._iterator(self._WORKQ) def _iterexcept(self, attr='_UTILQ'): ''' iterator broken on exception @param attr: things to iterate over ''' return self.iterexcept(getattr(self, attr).popleft, IndexError) def _breakcount(self, attr='_UTILQ'): ''' breakcount iterator @param attr: things to iterate over ''' dq = getattr(self, attr) return self.breakcount(dq.popleft, len(dq), IndexError,) ########################################################################### ## clear things ########################################################### ########################################################################### def _uclear(self): '''clear utility things''' self._util.clear() return self def _wclear(self): '''clear work things''' self._work.clear() return self def inclear(self): '''clear incoming things''' self.incoming.clear() return self def outclear(self): '''clear outgoing things''' self.outgoing.clear() return self ########################################################################### ## extend ################################################################# ########################################################################### def _xtend(self, things): '''extend utility things with `things` wrapped''' getattr(self, self._UTILQ).extend(things) return self def _xtendleft(self, things): '''extend left side of utility things with `things`''' getattr(self, self._UTILQ).extendleft(things) return self def _iter(self, things): '''extend work things with `things` wrapped in iterator''' getattr(self, self._UTILQ).extend(iter(things)) return self ########################################################################### ## append ################################################################# ########################################################################### def _append(self, things): '''append `things` to utility things''' getattr(self, self._UTILQ).append(things) return self def _appendleft(self, things): '''append `things` to left side of utility things''' getattr(self, self._UTILQ).appendleft(things) return self ########################################################################### ## context rotation ####################################################### ########################################################################### @contextmanager def ctx2(self, **kw): '''swap to two-armed context''' self.swap( outq=kw.get(self._OUTCFG, self._INVAR), context=self.ctx2(), **kw ) getr_ = lambda x: getattr(self, x) outq = getr_(self._OUTQ) utilq = getr_(self._UTILQ) workq = getr_(self._WORKQ) # clear all work things workq.clear() # extend work things with outgoing things workq.extend(outq) # swap iterator self._iterator = self._breakcount yield # clear outgoing things if so configured if self._clearout: outq.clear() # extend outgoing things with utility things outq.extend(utilq) # clear utility things utilq.clear() # return to global context self.reswap() @contextmanager def ctx3(self, **kw): '''swap to three-armed context''' self.swap( utilq=kw.get(self._WORKCFG, self._WORKVAR), context=self.ctx3, **kw ) getr_ = lambda x: getattr(self, x) outq = getr_(self._OUTQ) utilq = getr_(self._UTILQ) workq = getr_(self._WORKQ) # clear work things workq.clear() # extend work things with incoming things workq.extend(getr_(self._INQ)) # swap iterators self._iterator = self._breakcount yield # clear outgoing things if so configured if self._clearout: outq.clear() # extend outgoing things with utility things outq.extend(utilq) # clear utility things utilq.clear() # return to global context self.reswap() @contextmanager def ctx4(self, **kw): '''swap to four-armed context''' self.swap(context=self.ctx4, **kw) getr_ = lambda x: getattr(self, x) outq = getr_(self._OUTQ) utilq = getr_(self._UTILQ) workq = getr_(self._WORKQ) # clear work things workq.clear() # extend work things with incoming things workq.extend(getr_(self._INQ)) # swap iterators self._iterator = self._iterexcept yield # clear outgoing things if so configured if self._clearout: outq.clear() # extend outgoing things with utility things outq.extend(utilq) # clear utility things utilq.clear() # return to global context self.reswap() @contextmanager def autoctx(self, **kw): '''swap to auto-synchronizing context''' self.swap(context=self.autoctx, **kw) getr_ = lambda x: getattr(self, x) outq = getr_(self._OUTQ) utilq = getr_(self._UTILQ) workq = getr_(self._WORKQ) inq = getr_(self._INQ) # clear work things workq.clear() # extend work things with incoming things workq.extend(inq) # swap iterators self._iterator = self._iterexcept yield # clear outgoing things if so configured if self._clearout: outq.clear() outq.extend(utilq) # clear incoming things inq.clear() inq.extend(utilq) # clear utility things utilq.clear() # return to global context self.reswap() def ro(self): '''swap to read-only context''' with self.ctx3(outq=self._UTILVAR): self._xtend(self._iterable) with self.ctx1(hard=True, workq=self._UTILVAR): return self class AutoQMixin(BaseQMixin): '''auto-balancing queue mixin''' _default_context = 'autoctx' class ManQMixin(BaseQMixin): '''manually balanced queue mixin''' _default_context = 'ctx4' class EndMixin(ResultMixin): '''result things mixin''' def end(self): '''return outgoing things then clear out everything''' # return to default context self.unswap() wrap, outgoing = self._wrapper, self.outgoing out = self.outgoing.pop() if len(outgoing) == 1 else wrap(outgoing) # clear every last thing self.clear() return out def value(self): '''return outgoing things and clear outgoing things''' # return to default context self.unswap() wrap, outgoing = self._wrapper, self.outgoing out = self.outgoing.pop() if len(outgoing) == 1 else wrap(outgoing) # clear outgoing things self.outclear() return out class AutoResultMixin(AutoQMixin, EndMixin): '''auto-balancing manipulation things (with results extractor) mixin''' class ManResultMixin(ManQMixin, EndMixin): '''manually balanced things (with results extractor) mixin'''
{ "content_hash": "417e5afcd926ea1973719a9248c9e595", "timestamp": "", "source": "github", "line_count": 312, "max_line_length": 79, "avg_line_length": 30.846153846153847, "alnum_prop": 0.48794679966749793, "repo_name": "lcrees/twoq", "id": "3987655b1c5c41aca9e543a745c425673ab03479", "size": "9648", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "twoq/active/mixins.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "141428" } ], "symlink_target": "" }